> (nbits - rembits))
- dst[len(dst)-1] |= t
- }
-
- return dst
-}
-
-// HuffmanEncodeLength returns the number of bytes required to encode
-// s in Huffman codes. The result is round up to byte boundary.
-func HuffmanEncodeLength(s string) uint64 {
- n := uint64(0)
- for i := 0; i < len(s); i++ {
- n += uint64(huffmanCodeLen[s[i]])
- }
- return (n + 7) / 8
-}
-
-// appendByteToHuffmanCode appends Huffman code for c to dst and
-// returns the extended buffer and the remaining bits in the last
-// element. The appending is not byte aligned and the remaining bits
-// in the last element of dst is given in rembits.
-func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
- code := huffmanCodes[c]
- nbits := huffmanCodeLen[c]
-
- for {
- if rembits > nbits {
- t := uint8(code << (rembits - nbits))
- dst[len(dst)-1] |= t
- rembits -= nbits
- break
- }
-
- t := uint8(code >> (nbits - rembits))
- dst[len(dst)-1] |= t
-
- nbits -= rembits
- rembits = 8
-
- if nbits == 0 {
- break
- }
-
- dst = append(dst, 0)
- }
-
- return dst, rembits
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go
deleted file mode 100644
index b9283a0233..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package hpack
-
-func pair(name, value string) HeaderField {
- return HeaderField{Name: name, Value: value}
-}
-
-// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
-var staticTable = [...]HeaderField{
- pair(":authority", ""), // index 1 (1-based)
- pair(":method", "GET"),
- pair(":method", "POST"),
- pair(":path", "/"),
- pair(":path", "/index.html"),
- pair(":scheme", "http"),
- pair(":scheme", "https"),
- pair(":status", "200"),
- pair(":status", "204"),
- pair(":status", "206"),
- pair(":status", "304"),
- pair(":status", "400"),
- pair(":status", "404"),
- pair(":status", "500"),
- pair("accept-charset", ""),
- pair("accept-encoding", "gzip, deflate"),
- pair("accept-language", ""),
- pair("accept-ranges", ""),
- pair("accept", ""),
- pair("access-control-allow-origin", ""),
- pair("age", ""),
- pair("allow", ""),
- pair("authorization", ""),
- pair("cache-control", ""),
- pair("content-disposition", ""),
- pair("content-encoding", ""),
- pair("content-language", ""),
- pair("content-length", ""),
- pair("content-location", ""),
- pair("content-range", ""),
- pair("content-type", ""),
- pair("cookie", ""),
- pair("date", ""),
- pair("etag", ""),
- pair("expect", ""),
- pair("expires", ""),
- pair("from", ""),
- pair("host", ""),
- pair("if-match", ""),
- pair("if-modified-since", ""),
- pair("if-none-match", ""),
- pair("if-range", ""),
- pair("if-unmodified-since", ""),
- pair("last-modified", ""),
- pair("link", ""),
- pair("location", ""),
- pair("max-forwards", ""),
- pair("proxy-authenticate", ""),
- pair("proxy-authorization", ""),
- pair("range", ""),
- pair("referer", ""),
- pair("refresh", ""),
- pair("retry-after", ""),
- pair("server", ""),
- pair("set-cookie", ""),
- pair("strict-transport-security", ""),
- pair("transfer-encoding", ""),
- pair("user-agent", ""),
- pair("vary", ""),
- pair("via", ""),
- pair("www-authenticate", ""),
-}
-
-var huffmanCodes = [256]uint32{
- 0x1ff8,
- 0x7fffd8,
- 0xfffffe2,
- 0xfffffe3,
- 0xfffffe4,
- 0xfffffe5,
- 0xfffffe6,
- 0xfffffe7,
- 0xfffffe8,
- 0xffffea,
- 0x3ffffffc,
- 0xfffffe9,
- 0xfffffea,
- 0x3ffffffd,
- 0xfffffeb,
- 0xfffffec,
- 0xfffffed,
- 0xfffffee,
- 0xfffffef,
- 0xffffff0,
- 0xffffff1,
- 0xffffff2,
- 0x3ffffffe,
- 0xffffff3,
- 0xffffff4,
- 0xffffff5,
- 0xffffff6,
- 0xffffff7,
- 0xffffff8,
- 0xffffff9,
- 0xffffffa,
- 0xffffffb,
- 0x14,
- 0x3f8,
- 0x3f9,
- 0xffa,
- 0x1ff9,
- 0x15,
- 0xf8,
- 0x7fa,
- 0x3fa,
- 0x3fb,
- 0xf9,
- 0x7fb,
- 0xfa,
- 0x16,
- 0x17,
- 0x18,
- 0x0,
- 0x1,
- 0x2,
- 0x19,
- 0x1a,
- 0x1b,
- 0x1c,
- 0x1d,
- 0x1e,
- 0x1f,
- 0x5c,
- 0xfb,
- 0x7ffc,
- 0x20,
- 0xffb,
- 0x3fc,
- 0x1ffa,
- 0x21,
- 0x5d,
- 0x5e,
- 0x5f,
- 0x60,
- 0x61,
- 0x62,
- 0x63,
- 0x64,
- 0x65,
- 0x66,
- 0x67,
- 0x68,
- 0x69,
- 0x6a,
- 0x6b,
- 0x6c,
- 0x6d,
- 0x6e,
- 0x6f,
- 0x70,
- 0x71,
- 0x72,
- 0xfc,
- 0x73,
- 0xfd,
- 0x1ffb,
- 0x7fff0,
- 0x1ffc,
- 0x3ffc,
- 0x22,
- 0x7ffd,
- 0x3,
- 0x23,
- 0x4,
- 0x24,
- 0x5,
- 0x25,
- 0x26,
- 0x27,
- 0x6,
- 0x74,
- 0x75,
- 0x28,
- 0x29,
- 0x2a,
- 0x7,
- 0x2b,
- 0x76,
- 0x2c,
- 0x8,
- 0x9,
- 0x2d,
- 0x77,
- 0x78,
- 0x79,
- 0x7a,
- 0x7b,
- 0x7ffe,
- 0x7fc,
- 0x3ffd,
- 0x1ffd,
- 0xffffffc,
- 0xfffe6,
- 0x3fffd2,
- 0xfffe7,
- 0xfffe8,
- 0x3fffd3,
- 0x3fffd4,
- 0x3fffd5,
- 0x7fffd9,
- 0x3fffd6,
- 0x7fffda,
- 0x7fffdb,
- 0x7fffdc,
- 0x7fffdd,
- 0x7fffde,
- 0xffffeb,
- 0x7fffdf,
- 0xffffec,
- 0xffffed,
- 0x3fffd7,
- 0x7fffe0,
- 0xffffee,
- 0x7fffe1,
- 0x7fffe2,
- 0x7fffe3,
- 0x7fffe4,
- 0x1fffdc,
- 0x3fffd8,
- 0x7fffe5,
- 0x3fffd9,
- 0x7fffe6,
- 0x7fffe7,
- 0xffffef,
- 0x3fffda,
- 0x1fffdd,
- 0xfffe9,
- 0x3fffdb,
- 0x3fffdc,
- 0x7fffe8,
- 0x7fffe9,
- 0x1fffde,
- 0x7fffea,
- 0x3fffdd,
- 0x3fffde,
- 0xfffff0,
- 0x1fffdf,
- 0x3fffdf,
- 0x7fffeb,
- 0x7fffec,
- 0x1fffe0,
- 0x1fffe1,
- 0x3fffe0,
- 0x1fffe2,
- 0x7fffed,
- 0x3fffe1,
- 0x7fffee,
- 0x7fffef,
- 0xfffea,
- 0x3fffe2,
- 0x3fffe3,
- 0x3fffe4,
- 0x7ffff0,
- 0x3fffe5,
- 0x3fffe6,
- 0x7ffff1,
- 0x3ffffe0,
- 0x3ffffe1,
- 0xfffeb,
- 0x7fff1,
- 0x3fffe7,
- 0x7ffff2,
- 0x3fffe8,
- 0x1ffffec,
- 0x3ffffe2,
- 0x3ffffe3,
- 0x3ffffe4,
- 0x7ffffde,
- 0x7ffffdf,
- 0x3ffffe5,
- 0xfffff1,
- 0x1ffffed,
- 0x7fff2,
- 0x1fffe3,
- 0x3ffffe6,
- 0x7ffffe0,
- 0x7ffffe1,
- 0x3ffffe7,
- 0x7ffffe2,
- 0xfffff2,
- 0x1fffe4,
- 0x1fffe5,
- 0x3ffffe8,
- 0x3ffffe9,
- 0xffffffd,
- 0x7ffffe3,
- 0x7ffffe4,
- 0x7ffffe5,
- 0xfffec,
- 0xfffff3,
- 0xfffed,
- 0x1fffe6,
- 0x3fffe9,
- 0x1fffe7,
- 0x1fffe8,
- 0x7ffff3,
- 0x3fffea,
- 0x3fffeb,
- 0x1ffffee,
- 0x1ffffef,
- 0xfffff4,
- 0xfffff5,
- 0x3ffffea,
- 0x7ffff4,
- 0x3ffffeb,
- 0x7ffffe6,
- 0x3ffffec,
- 0x3ffffed,
- 0x7ffffe7,
- 0x7ffffe8,
- 0x7ffffe9,
- 0x7ffffea,
- 0x7ffffeb,
- 0xffffffe,
- 0x7ffffec,
- 0x7ffffed,
- 0x7ffffee,
- 0x7ffffef,
- 0x7fffff0,
- 0x3ffffee,
-}
-
-var huffmanCodeLen = [256]uint8{
- 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
- 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
- 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
- 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
- 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
- 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
- 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
- 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
- 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
- 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
- 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
- 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
- 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
- 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
- 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go
deleted file mode 100644
index 0529b63e2a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package http2 implements the HTTP/2 protocol.
-//
-// This package is low-level and intended to be used directly by very
-// few people. Most users will use it indirectly through the automatic
-// use by the net/http package (from Go 1.6 and later).
-// For use in earlier Go versions see ConfigureServer. (Transport support
-// requires Go 1.6 or later)
-//
-// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
-package http2
-
-import (
- "bufio"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "net/http"
- "os"
- "sort"
- "strconv"
- "strings"
- "sync"
-)
-
-var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
-)
-
-func init() {
- e := os.Getenv("GODEBUG")
- if strings.Contains(e, "http2debug=1") {
- VerboseLogs = true
- }
- if strings.Contains(e, "http2debug=2") {
- VerboseLogs = true
- logFrameWrites = true
- logFrameReads = true
- }
-}
-
-const (
- // ClientPreface is the string that must be sent by new
- // connections from clients.
- ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
-
- // SETTINGS_MAX_FRAME_SIZE default
- // http://http2.github.io/http2-spec/#rfc.section.6.5.2
- initialMaxFrameSize = 16384
-
- // NextProtoTLS is the NPN/ALPN protocol negotiated during
- // HTTP/2's TLS setup.
- NextProtoTLS = "h2"
-
- // http://http2.github.io/http2-spec/#SettingValues
- initialHeaderTableSize = 4096
-
- initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
-
- defaultMaxReadFrameSize = 1 << 20
-)
-
-var (
- clientPreface = []byte(ClientPreface)
-)
-
-type streamState int
-
-const (
- stateIdle streamState = iota
- stateOpen
- stateHalfClosedLocal
- stateHalfClosedRemote
- stateResvLocal
- stateResvRemote
- stateClosed
-)
-
-var stateName = [...]string{
- stateIdle: "Idle",
- stateOpen: "Open",
- stateHalfClosedLocal: "HalfClosedLocal",
- stateHalfClosedRemote: "HalfClosedRemote",
- stateResvLocal: "ResvLocal",
- stateResvRemote: "ResvRemote",
- stateClosed: "Closed",
-}
-
-func (st streamState) String() string {
- return stateName[st]
-}
-
-// Setting is a setting parameter: which setting it is, and its value.
-type Setting struct {
- // ID is which setting is being set.
- // See http://http2.github.io/http2-spec/#SettingValues
- ID SettingID
-
- // Val is the value.
- Val uint32
-}
-
-func (s Setting) String() string {
- return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
-}
-
-// Valid reports whether the setting is valid.
-func (s Setting) Valid() error {
- // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
- switch s.ID {
- case SettingEnablePush:
- if s.Val != 1 && s.Val != 0 {
- return ConnectionError(ErrCodeProtocol)
- }
- case SettingInitialWindowSize:
- if s.Val > 1<<31-1 {
- return ConnectionError(ErrCodeFlowControl)
- }
- case SettingMaxFrameSize:
- if s.Val < 16384 || s.Val > 1<<24-1 {
- return ConnectionError(ErrCodeProtocol)
- }
- }
- return nil
-}
-
-// A SettingID is an HTTP/2 setting as defined in
-// http://http2.github.io/http2-spec/#iana-settings
-type SettingID uint16
-
-const (
- SettingHeaderTableSize SettingID = 0x1
- SettingEnablePush SettingID = 0x2
- SettingMaxConcurrentStreams SettingID = 0x3
- SettingInitialWindowSize SettingID = 0x4
- SettingMaxFrameSize SettingID = 0x5
- SettingMaxHeaderListSize SettingID = 0x6
-)
-
-var settingName = map[SettingID]string{
- SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- SettingEnablePush: "ENABLE_PUSH",
- SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- SettingMaxFrameSize: "MAX_FRAME_SIZE",
- SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
-}
-
-func (s SettingID) String() string {
- if v, ok := settingName[s]; ok {
- return v
- }
- return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
-}
-
-var (
- errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
- errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
-)
-
-// validHeaderFieldName reports whether v is a valid header field name (key).
-// RFC 7230 says:
-// header-field = field-name ":" OWS field-value OWS
-// field-name = token
-// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
-// "^" / "_" / "
-// Further, http2 says:
-// "Just as in HTTP/1.x, header field names are strings of ASCII
-// characters that are compared in a case-insensitive
-// fashion. However, header field names MUST be converted to
-// lowercase prior to their encoding in HTTP/2. "
-func validHeaderFieldName(v string) bool {
- if len(v) == 0 {
- return false
- }
- for _, r := range v {
- if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') {
- return false
- }
- if !isTokenTable[byte(r)] {
- return false
- }
- }
- return true
-}
-
-// validHeaderFieldValue reports whether v is a valid header field value.
-//
-// RFC 7230 says:
-// field-value = *( field-content / obs-fold )
-// obj-fold = N/A to http2, and deprecated
-// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
-// field-vchar = VCHAR / obs-text
-// obs-text = %x80-FF
-// VCHAR = "any visible [USASCII] character"
-//
-// http2 further says: "Similarly, HTTP/2 allows header field values
-// that are not valid. While most of the values that can be encoded
-// will not alter header field parsing, carriage return (CR, ASCII
-// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
-// 0x0) might be exploited by an attacker if they are translated
-// verbatim. Any request or response that contains a character not
-// permitted in a header field value MUST be treated as malformed
-// (Section 8.1.2.6). Valid characters are defined by the
-// field-content ABNF rule in Section 3.2 of [RFC7230]."
-//
-// This function does not (yet?) properly handle the rejection of
-// strings that begin or end with SP or HTAB.
-func validHeaderFieldValue(v string) bool {
- for i := 0; i < len(v); i++ {
- if b := v[i]; b < ' ' && b != '\t' || b == 0x7f {
- return false
- }
- }
- return true
-}
-
-var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
-
-func init() {
- for i := 100; i <= 999; i++ {
- if v := http.StatusText(i); v != "" {
- httpCodeStringCommon[i] = strconv.Itoa(i)
- }
- }
-}
-
-func httpCodeString(code int) string {
- if s, ok := httpCodeStringCommon[code]; ok {
- return s
- }
- return strconv.Itoa(code)
-}
-
-// from pkg io
-type stringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-// A gate lets two goroutines coordinate their activities.
-type gate chan struct{}
-
-func (g gate) Done() { g <- struct{}{} }
-func (g gate) Wait() { <-g }
-
-// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
-type closeWaiter chan struct{}
-
-// Init makes a closeWaiter usable.
-// It exists because so a closeWaiter value can be placed inside a
-// larger struct and have the Mutex and Cond's memory in the same
-// allocation.
-func (cw *closeWaiter) Init() {
- *cw = make(chan struct{})
-}
-
-// Close marks the closeWaiter as closed and unblocks any waiters.
-func (cw closeWaiter) Close() {
- close(cw)
-}
-
-// Wait waits for the closeWaiter to become closed.
-func (cw closeWaiter) Wait() {
- <-cw
-}
-
-// bufferedWriter is a buffered writer that writes to w.
-// Its buffered writer is lazily allocated as needed, to minimize
-// idle memory usage with many connections.
-type bufferedWriter struct {
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
-}
-
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
-}
-
-var bufWriterPool = sync.Pool{
- New: func() interface{} {
- // TODO: pick something better? this is a bit under
- // (3 x typical 1500 byte MTU) at least.
- return bufio.NewWriterSize(nil, 4<<10)
- },
-}
-
-func (w *bufferedWriter) Write(p []byte) (n int, err error) {
- if w.bw == nil {
- bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
- w.bw = bw
- }
- return w.bw.Write(p)
-}
-
-func (w *bufferedWriter) Flush() error {
- bw := w.bw
- if bw == nil {
- return nil
- }
- err := bw.Flush()
- bw.Reset(nil)
- bufWriterPool.Put(bw)
- w.bw = nil
- return err
-}
-
-func mustUint31(v int32) uint32 {
- if v < 0 || v > 2147483647 {
- panic("out of range")
- }
- return uint32(v)
-}
-
-// bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC2616, section 4.4.
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-type httpError struct {
- msg string
- timeout bool
-}
-
-func (e *httpError) Error() string { return e.msg }
-func (e *httpError) Timeout() bool { return e.timeout }
-func (e *httpError) Temporary() bool { return true }
-
-var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
-
-var isTokenTable = [127]bool{
- '!': true,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
-
-type connectionStater interface {
- ConnectionState() tls.ConnectionState
-}
-
-var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
-
-type sorter struct {
- v []string // owned by sorter
-}
-
-func (s *sorter) Len() int { return len(s.v) }
-func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
-func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
-
-// Keys returns the sorted keys of h.
-//
-// The returned slice is only valid until s used again or returned to
-// its pool.
-func (s *sorter) Keys(h http.Header) []string {
- keys := s.v[:0]
- for k := range h {
- keys = append(keys, k)
- }
- s.v = keys
- sort.Sort(s)
- return keys
-}
-
-func (s *sorter) SortStrings(ss []string) {
- // Our sorter works on s.v, which sorter owners, so
- // stash it away while we sort the user's buffer.
- save := s.v
- s.v = ss
- sort.Sort(s)
- s.v = save
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go
deleted file mode 100644
index d0fa5c8906..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.5
-
-package http2
-
-import "net/http"
-
-func requestCancel(req *http.Request) <-chan struct{} { return nil }
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go
deleted file mode 100644
index db53c5b8cb..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.6
-
-package http2
-
-import "net/http"
-
-func configureTransport(t1 *http.Transport) (*Transport, error) {
- return nil, errTransportVersion
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go
deleted file mode 100644
index 69446e7a37..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "errors"
- "io"
- "sync"
-)
-
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
-// io.Pipe except there are no PipeReader/PipeWriter halves, and the
-// underlying buffer is an interface. (io.Pipe is always unbuffered)
-type pipe struct {
- mu sync.Mutex
- c sync.Cond // c.L lazily initialized to &p.mu
- b pipeBuffer
- err error // read error once empty. non-nil means closed.
- breakErr error // immediate read error (caller doesn't see rest of b)
- donec chan struct{} // closed on error
- readFn func() // optional code to run in Read before error
-}
-
-type pipeBuffer interface {
- Len() int
- io.Writer
- io.Reader
-}
-
-// Read waits until data is available and copies bytes
-// from the buffer into p.
-func (p *pipe) Read(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- for {
- if p.breakErr != nil {
- return 0, p.breakErr
- }
- if p.b.Len() > 0 {
- return p.b.Read(d)
- }
- if p.err != nil {
- if p.readFn != nil {
- p.readFn() // e.g. copy trailers
- p.readFn = nil // not sticky like p.err
- }
- return 0, p.err
- }
- p.c.Wait()
- }
-}
-
-var errClosedPipeWrite = errors.New("write on closed buffer")
-
-// Write copies bytes from p into the buffer and wakes a reader.
-// It is an error to write more data than the buffer can hold.
-func (p *pipe) Write(d []byte) (n int, err error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if p.err != nil {
- return 0, errClosedPipeWrite
- }
- return p.b.Write(d)
-}
-
-// CloseWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err after all data has been
-// read.
-//
-// The error must be non-nil.
-func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
-
-// BreakWithError causes the next Read (waking up a current blocked
-// Read if needed) to return the provided err immediately, without
-// waiting for unread data.
-func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
-
-// closeWithErrorAndCode is like CloseWithError but also sets some code to run
-// in the caller's goroutine before returning the error.
-func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
-
-func (p *pipe) closeWithError(dst *error, err error, fn func()) {
- if err == nil {
- panic("err must be non-nil")
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.c.L == nil {
- p.c.L = &p.mu
- }
- defer p.c.Signal()
- if *dst != nil {
- // Already been done.
- return
- }
- p.readFn = fn
- *dst = err
- p.closeDoneLocked()
-}
-
-// requires p.mu be held.
-func (p *pipe) closeDoneLocked() {
- if p.donec == nil {
- return
- }
- // Close if unclosed. This isn't racy since we always
- // hold p.mu while closing.
- select {
- case <-p.donec:
- default:
- close(p.donec)
- }
-}
-
-// Err returns the error (if any) first set by BreakWithError or CloseWithError.
-func (p *pipe) Err() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.breakErr != nil {
- return p.breakErr
- }
- return p.err
-}
-
-// Done returns a channel which is closed if and when this pipe is closed
-// with CloseWithError.
-func (p *pipe) Done() <-chan struct{} {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.donec == nil {
- p.donec = make(chan struct{})
- if p.err != nil || p.breakErr != nil {
- // Already hit an error.
- p.closeDoneLocked()
- }
- }
- return p.donec
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go
deleted file mode 100644
index 1e6980c319..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go
+++ /dev/null
@@ -1,2178 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable? or maximum number of idle clients and remove the
-// oldest?
-
-// TODO: turn off the serve goroutine when idle, so
-// an idle conn only has the readFrames goroutine active. (which could
-// also be optimized probably to pin less memory in crypto/tls). This
-// would involve tracking when the serve goroutine is active (atomic
-// int32 read/CAS probably?) and starting it up when frames arrive,
-// and shutting it down when all handlers exit. the occasional PING
-// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
-// (which is a no-op if already running) and then queue the PING write
-// as normal. The serve loop would then exit in most cases (if no
-// Handlers running) and not be woken up again until the PING packet
-// returns.
-
-// TODO (maybe): add a mechanism for Handlers to going into
-// half-closed-local mode (rw.(io.Closer) test?) but not exit their
-// handler, and continue to be able to read from the
-// Request.Body. This would be a somewhat semantic change from HTTP/1
-// (or at least what we expose in net/http), so I'd probably want to
-// add it there too. For now, this package says that returning from
-// the Handler ServeHTTP function means you're both done reading and
-// done writing, without a way to stop just one or the other.
-
-package http2
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "os"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
-)
-
-var (
- errClientDisconnected = errors.New("client disconnected")
- errClosedBody = errors.New("body closed by handler")
- errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
- errStreamClosed = errors.New("http2: stream closed")
-)
-
-var responseWriterStatePool = sync.Pool{
- New: func() interface{} {
- rws := &responseWriterState{}
- rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
- return rws
- },
-}
-
-// Test hooks.
-var (
- testHookOnConn func()
- testHookGetServerConn func(*serverConn)
- testHookOnPanicMu *sync.Mutex // nil except in tests
- testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
-)
-
-// Server is an HTTP/2 server.
-type Server struct {
- // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
- // which may run at a time over all connections.
- // Negative or zero no limit.
- // TODO: implement
- MaxHandlers int
-
- // MaxConcurrentStreams optionally specifies the number of
- // concurrent streams that each client may have open at a
- // time. This is unrelated to the number of http.Handler goroutines
- // which may be active globally, which is MaxHandlers.
- // If zero, MaxConcurrentStreams defaults to at least 100, per
- // the HTTP/2 spec's recommendations.
- MaxConcurrentStreams uint32
-
- // MaxReadFrameSize optionally specifies the largest frame
- // this server is willing to read. A valid value is between
- // 16k and 16M, inclusive. If zero or otherwise invalid, a
- // default value is used.
- MaxReadFrameSize uint32
-
- // PermitProhibitedCipherSuites, if true, permits the use of
- // cipher suites prohibited by the HTTP/2 spec.
- PermitProhibitedCipherSuites bool
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-// ConfigureServer adds HTTP/2 support to a net/http Server.
-//
-// The configuration conf may be nil.
-//
-// ConfigureServer must be called before s begins serving.
-func ConfigureServer(s *http.Server, conf *Server) error {
- if conf == nil {
- conf = new(Server)
- }
-
- if s.TLSConfig == nil {
- s.TLSConfig = new(tls.Config)
- } else if s.TLSConfig.CipherSuites != nil {
- // If they already provided a CipherSuite list, return
- // an error if it has a bad order or is missing
- // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
- const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- haveRequired := false
- sawBad := false
- for i, cs := range s.TLSConfig.CipherSuites {
- if cs == requiredCipher {
- haveRequired = true
- }
- if isBadCipher(cs) {
- sawBad = true
- } else if sawBad {
- return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
- }
- }
- if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
- }
- }
-
- // Note: not setting MinVersion to tls.VersionTLS12,
- // as we don't want to interfere with HTTP/1.1 traffic
- // on the user's server. We enforce TLS 1.2 later once
- // we accept a connection. Ideally this should be done
- // during next-proto selection, but using TLS <1.2 with
- // HTTP/2 is still the client's bug.
-
- s.TLSConfig.PreferServerCipherSuites = true
-
- haveNPN := false
- for _, p := range s.TLSConfig.NextProtos {
- if p == NextProtoTLS {
- haveNPN = true
- break
- }
- }
- if !haveNPN {
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
- }
- // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
- // to switch to "h2".
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
-
- if s.TLSNextProto == nil {
- s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
- }
- protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
- if testHookOnConn != nil {
- testHookOnConn()
- }
- conf.ServeConn(c, &ServeConnOpts{
- Handler: h,
- BaseConfig: hs,
- })
- }
- s.TLSNextProto[NextProtoTLS] = protoHandler
- s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
- return nil
-}
-
-// ServeConnOpts are options for the Server.ServeConn method.
-type ServeConnOpts struct {
- // BaseConfig optionally sets the base configuration
- // for values. If nil, defaults are used.
- BaseConfig *http.Server
-
- // Handler specifies which handler to use for processing
- // requests. If nil, BaseConfig.Handler is used. If BaseConfig
- // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
- Handler http.Handler
-}
-
-func (o *ServeConnOpts) baseConfig() *http.Server {
- if o != nil && o.BaseConfig != nil {
- return o.BaseConfig
- }
- return new(http.Server)
-}
-
-func (o *ServeConnOpts) handler() http.Handler {
- if o != nil {
- if o.Handler != nil {
- return o.Handler
- }
- if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
- return o.BaseConfig.Handler
- }
- }
- return http.DefaultServeMux
-}
-
-// ServeConn serves HTTP/2 requests on the provided connection and
-// blocks until the connection is no longer readable.
-//
-// ServeConn starts speaking HTTP/2 assuming that c has not had any
-// reads or writes. It writes its initial settings frame and expects
-// to be able to read the preface and settings frame from the
-// client. If c has a ConnectionState method like a *tls.Conn, the
-// ConnectionState is used to verify the TLS ciphersuite and to set
-// the Request.TLS field in Handlers.
-//
-// ServeConn does not support h2c by itself. Any h2c support must be
-// implemented in terms of providing a suitably-behaving net.Conn.
-//
-// The opts parameter is optional. If nil, default values are used.
-func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
- sc := &serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*stream),
- readFrameCh: make(chan readFrameResult),
- wantWriteFrameCh: make(chan frameWriteMsg, 8),
- wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- advMaxStreams: s.maxConcurrentStreams(),
- writeSched: writeScheduler{
- maxFrameSize: initialMaxFrameSize,
- },
- initialWindowSize: initialWindowSize,
- headerTableSize: initialHeaderTableSize,
- serveG: newGoroutineLock(),
- pushEnabled: true,
- }
- sc.flow.add(initialWindowSize)
- sc.inflow.add(initialWindowSize)
- sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
-
- fr := NewFramer(sc.bw, c)
- fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
- fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
- sc.framer = fr
-
- if tc, ok := c.(connectionStater); ok {
- sc.tlsState = new(tls.ConnectionState)
- *sc.tlsState = tc.ConnectionState()
- // 9.2 Use of TLS Features
- // An implementation of HTTP/2 over TLS MUST use TLS
- // 1.2 or higher with the restrictions on feature set
- // and cipher suite described in this section. Due to
- // implementation limitations, it might not be
- // possible to fail TLS negotiation. An endpoint MUST
- // immediately terminate an HTTP/2 connection that
- // does not meet the TLS requirements described in
- // this section with a connection error (Section
- // 5.4.1) of type INADEQUATE_SECURITY.
- if sc.tlsState.Version < tls.VersionTLS12 {
- sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
- return
- }
-
- if sc.tlsState.ServerName == "" {
- // Client must use SNI, but we don't enforce that anymore,
- // since it was causing problems when connecting to bare IP
- // addresses during development.
- //
- // TODO: optionally enforce? Or enforce at the time we receive
- // a new request, and verify the the ServerName matches the :authority?
- // But that precludes proxy situations, perhaps.
- //
- // So for now, do nothing here again.
- }
-
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
- // "Endpoints MAY choose to generate a connection error
- // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
- // the prohibited cipher suites are negotiated."
- //
- // We choose that. In my opinion, the spec is weak
- // here. It also says both parties must support at least
- // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
- // excuses here. If we really must, we could allow an
- // "AllowInsecureWeakCiphers" option on the server later.
- // Let's see how it plays out first.
- sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
- return
- }
- }
-
- if hook := testHookGetServerConn; hook != nil {
- hook(sc)
- }
- sc.serve()
-}
-
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- // Reject cipher suites from Appendix A.
- // "This list includes those cipher suites that do not
- // offer an ephemeral key exchange and those that are
- // based on the TLS null, stream or block cipher type"
- return true
- default:
- return false
- }
-}
-
-func (sc *serverConn) rejectConn(err ErrCode, debug string) {
- sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
- // ignoring errors. hanging up anyway.
- sc.framer.WriteGoAway(0, err, []byte(debug))
- sc.bw.Flush()
- sc.conn.Close()
-}
-
-type serverConn struct {
- // Immutable:
- srv *Server
- hs *http.Server
- conn net.Conn
- bw *bufferedWriter // writing to conn
- handler http.Handler
- framer *Framer
- doneServing chan struct{} // closed when serverConn.serve ends
- readFrameCh chan readFrameResult // written by serverConn.readFrames
- wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
- wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
- bodyReadCh chan bodyReadMsg // from handlers -> serve
- testHookCh chan func(int) // code to run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
- tlsState *tls.ConnectionState // shared by all handlers, like net/http
- remoteAddrStr string
-
- // Everything following is owned by the serve loop; use serveG.check():
- serveG goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curOpenStreams uint32 // client's number of open streams
- maxStreamID uint32 // max ever seen
- streams map[uint32]*stream
- initialWindowSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- writeSched writeScheduler
- inGoAway bool // we've started to or sent GOAWAY
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode ErrCode
- shutdownTimerCh <-chan time.Time // nil until used
- shutdownTimer *time.Timer // nil until used
- freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
-
- // Owned by the writeFrameAsync goroutine:
- headerWriteBuf bytes.Buffer
- hpackEncoder *hpack.Encoder
-}
-
-func (sc *serverConn) maxHeaderListSize() uint32 {
- n := sc.hs.MaxHeaderBytes
- if n <= 0 {
- n = http.DefaultMaxHeaderBytes
- }
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
-}
-
-// stream represents a stream. This is the minimal metadata needed by
-// the serve goroutine. Most of the actual stream state is owned by
-// the http.Handler's goroutine in the responseWriter. Because the
-// responseWriter's responseWriterState is recycled at the end of a
-// handler, this struct intentionally has no pointer to the
-// *responseWriter{,State} itself, as the Handler ending nils out the
-// responseWriter's state field.
-type stream struct {
- // immutable:
- sc *serverConn
- id uint32
- body *pipe // non-nil if expecting DATA frames
- cw closeWaiter // closed wait stream transitions to closed state
-
- // owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
- parent *stream // or nil
- numTrailerValues int64
- weight uint8
- state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
- gotTrailerHeader bool // HEADER frame for trailers was seen
- reqBuf []byte
-
- trailer http.Header // accumulated trailers
- reqTrailer http.Header // handler's Request.Trailer
-}
-
-func (sc *serverConn) Framer() *Framer { return sc.framer }
-func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
-func (sc *serverConn) Flush() error { return sc.bw.Flush() }
-func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
- return sc.hpackEncoder, &sc.headerWriteBuf
-}
-
-func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
- sc.serveG.check()
- // http://http2.github.io/http2-spec/#rfc.section.5.1
- if st, ok := sc.streams[streamID]; ok {
- return st.state, st
- }
- // "The first use of a new stream identifier implicitly closes all
- // streams in the "idle" state that might have been initiated by
- // that peer with a lower-valued stream identifier. For example, if
- // a client sends a HEADERS frame on stream 7 without ever sending a
- // frame on stream 5, then stream 5 transitions to the "closed"
- // state when the first frame for stream 7 is sent or received."
- if streamID <= sc.maxStreamID {
- return stateClosed, nil
- }
- return stateIdle, nil
-}
-
-// setConnState calls the net/http ConnState hook for this connection, if configured.
-// Note that the net/http package does StateNew and StateClosed for us.
-// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
-func (sc *serverConn) setConnState(state http.ConnState) {
- if sc.hs.ConnState != nil {
- sc.hs.ConnState(sc.conn, state)
- }
-}
-
-func (sc *serverConn) vlogf(format string, args ...interface{}) {
- if VerboseLogs {
- sc.logf(format, args...)
- }
-}
-
-func (sc *serverConn) logf(format string, args ...interface{}) {
- if lg := sc.hs.ErrorLog; lg != nil {
- lg.Printf(format, args...)
- } else {
- log.Printf(format, args...)
- }
-}
-
-// errno returns v's underlying uintptr, else 0.
-//
-// TODO: remove this helper function once http2 can use build
-// tags. See comment in isClosedConnError.
-func errno(v error) uintptr {
- if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
- return uintptr(rv.Uint())
- }
- return 0
-}
-
-// isClosedConnError reports whether err is an error from use of a closed
-// network connection.
-func isClosedConnError(err error) bool {
- if err == nil {
- return false
- }
-
- // TODO: remove this string search and be more like the Windows
- // case below. That might involve modifying the standard library
- // to return better error types.
- str := err.Error()
- if strings.Contains(str, "use of closed network connection") {
- return true
- }
-
- // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
- // build tags, so I can't make an http2_windows.go file with
- // Windows-specific stuff. Fix that and move this, once we
- // have a way to bundle this into std's net/http somehow.
- if runtime.GOOS == "windows" {
- if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
- if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
- const WSAECONNABORTED = 10053
- const WSAECONNRESET = 10054
- if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
- return true
- }
- }
- }
- }
- return false
-}
-
-func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
- if err == nil {
- return
- }
- if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
- // Boring, expected errors.
- sc.vlogf(format, args...)
- } else {
- sc.logf(format, args...)
- }
-}
-
-func (sc *serverConn) canonicalHeader(v string) string {
- sc.serveG.check()
- cv, ok := commonCanonHeader[v]
- if ok {
- return cv
- }
- cv, ok = sc.canonHeader[v]
- if ok {
- return cv
- }
- if sc.canonHeader == nil {
- sc.canonHeader = make(map[string]string)
- }
- cv = http.CanonicalHeaderKey(v)
- sc.canonHeader[v] = cv
- return cv
-}
-
-type readFrameResult struct {
- f Frame // valid until readMore is called
- err error
-
- // readMore should be called once the consumer no longer needs or
- // retains f. After readMore, f is invalid and more frames can be
- // read.
- readMore func()
-}
-
-// readFrames is the loop that reads incoming frames.
-// It takes care to only read one frame at a time, blocking until the
-// consumer is done with the frame.
-// It's run on its own goroutine.
-func (sc *serverConn) readFrames() {
- gate := make(gate)
- gateDone := gate.Done
- for {
- f, err := sc.framer.ReadFrame()
- select {
- case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
- case <-sc.doneServing:
- return
- }
- select {
- case <-gate:
- case <-sc.doneServing:
- return
- }
- if terminalReadFrameError(err) {
- return
- }
- }
-}
-
-// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
-type frameWriteResult struct {
- wm frameWriteMsg // what was written (or attempted)
- err error // result of the writeFrame call
-}
-
-// writeFrameAsync runs in its own goroutine and writes a single frame
-// and then reports when it's done.
-// At most one goroutine can be running writeFrameAsync at a time per
-// serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
- err := wm.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wm, err}
-}
-
-func (sc *serverConn) closeAllStreamsOnConnClose() {
- sc.serveG.check()
- for _, st := range sc.streams {
- sc.closeStream(st, errClientDisconnected)
- }
-}
-
-func (sc *serverConn) stopShutdownTimer() {
- sc.serveG.check()
- if t := sc.shutdownTimer; t != nil {
- t.Stop()
- }
-}
-
-func (sc *serverConn) notePanic() {
- // Note: this is for serverConn.serve panicking, not http.Handler code.
- if testHookOnPanicMu != nil {
- testHookOnPanicMu.Lock()
- defer testHookOnPanicMu.Unlock()
- }
- if testHookOnPanic != nil {
- if e := recover(); e != nil {
- if testHookOnPanic(sc, e) {
- panic(e)
- }
- }
- }
-}
-
-func (sc *serverConn) serve() {
- sc.serveG.check()
- defer sc.notePanic()
- defer sc.conn.Close()
- defer sc.closeAllStreamsOnConnClose()
- defer sc.stopShutdownTimer()
- defer close(sc.doneServing) // unblocks handlers trying to send
-
- if VerboseLogs {
- sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
- }
-
- sc.writeFrame(frameWriteMsg{
- write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
-
- // TODO: more actual settings, notably
- // SettingInitialWindowSize, but then we also
- // want to bump up the conn window size the
- // same amount here right after the settings
- },
- })
- sc.unackedSettings++
-
- if err := sc.readPreface(); err != nil {
- sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
- return
- }
- // Now that we've got the preface, get us out of the
- // "StateNew" state. We can't go directly to idle, though.
- // Active means we read some data and anticipate a request. We'll
- // do another Active when we get a HEADERS frame.
- sc.setConnState(http.StateActive)
- sc.setConnState(http.StateIdle)
-
- go sc.readFrames() // closed by defer sc.conn.Close above
-
- settingsTimer := time.NewTimer(firstSettingsTimeout)
- loopNum := 0
- for {
- loopNum++
- select {
- case wm := <-sc.wantWriteFrameCh:
- sc.writeFrame(wm)
- case res := <-sc.wroteFrameCh:
- sc.wroteFrame(res)
- case res := <-sc.readFrameCh:
- if !sc.processFrameFromReader(res) {
- return
- }
- res.readMore()
- if settingsTimer.C != nil {
- settingsTimer.Stop()
- settingsTimer.C = nil
- }
- case m := <-sc.bodyReadCh:
- sc.noteBodyRead(m.st, m.n)
- case <-settingsTimer.C:
- sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
- return
- case <-sc.shutdownTimerCh:
- sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
- return
- case fn := <-sc.testHookCh:
- fn(loopNum)
- }
- }
-}
-
-// readPreface reads the ClientPreface greeting from the peer
-// or returns an error on timeout or an invalid greeting.
-func (sc *serverConn) readPreface() error {
- errc := make(chan error, 1)
- go func() {
- // Read the client preface
- buf := make([]byte, len(ClientPreface))
- if _, err := io.ReadFull(sc.conn, buf); err != nil {
- errc <- err
- } else if !bytes.Equal(buf, clientPreface) {
- errc <- fmt.Errorf("bogus greeting %q", buf)
- } else {
- errc <- nil
- }
- }()
- timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
- defer timer.Stop()
- select {
- case <-timer.C:
- return errors.New("timeout waiting for client preface")
- case err := <-errc:
- if err == nil {
- if VerboseLogs {
- sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
- }
- }
- return err
- }
-}
-
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
-var writeDataPool = sync.Pool{
- New: func() interface{} { return new(writeData) },
-}
-
-// writeDataFromHandler writes DATA response frames from a handler on
-// the given stream.
-func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
- writeArg := writeDataPool.Get().(*writeData)
- *writeArg = writeData{stream.id, data, endStream}
- err := sc.writeFrameFromHandler(frameWriteMsg{
- write: writeArg,
- stream: stream,
- done: ch,
- })
- if err != nil {
- return err
- }
- var frameWriteDone bool // the frame write is done (successfully or not)
- select {
- case err = <-ch:
- frameWriteDone = true
- case <-sc.doneServing:
- return errClientDisconnected
- case <-stream.cw:
- // If both ch and stream.cw were ready (as might
- // happen on the final Write after an http.Handler
- // ends), prefer the write result. Otherwise this
- // might just be us successfully closing the stream.
- // The writeFrameAsync and serve goroutines guarantee
- // that the ch send will happen before the stream.cw
- // close.
- select {
- case err = <-ch:
- frameWriteDone = true
- default:
- return errStreamClosed
- }
- }
- errChanPool.Put(ch)
- if frameWriteDone {
- writeDataPool.Put(writeArg)
- }
- return err
-}
-
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
-// if the connection has gone away.
-//
-// This must not be run from the serve goroutine itself, else it might
-// deadlock writing to sc.wantWriteFrameCh (which is only mildly
-// buffered and is read by serve itself). If you're on the serve
-// goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
- sc.serveG.checkNotOn() // NOT
- select {
- case sc.wantWriteFrameCh <- wm:
- return nil
- case <-sc.doneServing:
- // Serve loop is gone.
- // Client has closed their connection to the server.
- return errClientDisconnected
- }
-}
-
-// writeFrame schedules a frame to write and sends it if there's nothing
-// already being written.
-//
-// There is no pushback here (the serve goroutine never blocks). It's
-// the http.Handlers that block, waiting for their previous frames to
-// make it onto the wire
-//
-// If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
- sc.serveG.check()
- sc.writeSched.add(wm)
- sc.scheduleFrameWrite()
-}
-
-// startFrameWrite starts a goroutine to write wm (in a separate
-// goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
- sc.serveG.check()
- if sc.writingFrame {
- panic("internal error: can only be writing one frame at a time")
- }
-
- st := wm.stream
- if st != nil {
- switch st.state {
- case stateHalfClosedLocal:
- panic("internal error: attempt to send frame on half-closed-local stream")
- case stateClosed:
- if st.sentReset || st.gotReset {
- // Skip this frame.
- sc.scheduleFrameWrite()
- return
- }
- panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
- }
- }
-
- sc.writingFrame = true
- sc.needsFrameFlush = true
- go sc.writeFrameAsync(wm)
-}
-
-// errHandlerPanicked is the error given to any callers blocked in a read from
-// Request.Body when the main goroutine panics. Since most handlers read in the
-// the main ServeHTTP goroutine, this will show up rarely.
-var errHandlerPanicked = errors.New("http2: handler panicked")
-
-// wroteFrame is called on the serve goroutine with the result of
-// whatever happened on writeFrameAsync.
-func (sc *serverConn) wroteFrame(res frameWriteResult) {
- sc.serveG.check()
- if !sc.writingFrame {
- panic("internal error: expected to be already writing a frame")
- }
- sc.writingFrame = false
-
- wm := res.wm
- st := wm.stream
-
- closeStream := endsStream(wm.write)
-
- if _, ok := wm.write.(handlerPanicRST); ok {
- sc.closeStream(st, errHandlerPanicked)
- }
-
- // Reply (if requested) to the blocked ServeHTTP goroutine.
- if ch := wm.done; ch != nil {
- select {
- case ch <- res.err:
- default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
- }
- }
- wm.write = nil // prevent use (assume it's tainted after wm.done send)
-
- if closeStream {
- if st == nil {
- panic("internal error: expecting non-nil stream")
- }
- switch st.state {
- case stateOpen:
- // Here we would go to stateHalfClosedLocal in
- // theory, but since our handler is done and
- // the net/http package provides no mechanism
- // for finishing writing to a ResponseWriter
- // while still reading data (see possible TODO
- // at top of this file), we go into closed
- // state here anyway, after telling the peer
- // we're hanging up on them.
- st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
- errCancel := StreamError{st.id, ErrCodeCancel}
- sc.resetStream(errCancel)
- case stateHalfClosedRemote:
- sc.closeStream(st, errHandlerComplete)
- }
- }
-
- sc.scheduleFrameWrite()
-}
-
-// scheduleFrameWrite tickles the frame writing scheduler.
-//
-// If a frame is already being written, nothing happens. This will be called again
-// when the frame is done being written.
-//
-// If a frame isn't being written we need to send one, the best frame
-// to send is selected, preferring first things that aren't
-// stream-specific (e.g. ACKing settings), and then finding the
-// highest priority stream.
-//
-// If a frame isn't being written and there's nothing else to send, we
-// flush the write buffer.
-func (sc *serverConn) scheduleFrameWrite() {
- sc.serveG.check()
- if sc.writingFrame {
- return
- }
- if sc.needToSendGoAway {
- sc.needToSendGoAway = false
- sc.startFrameWrite(frameWriteMsg{
- write: &writeGoAway{
- maxStreamID: sc.maxStreamID,
- code: sc.goAwayCode,
- },
- })
- return
- }
- if sc.needToSendSettingsAck {
- sc.needToSendSettingsAck = false
- sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
- return
- }
- if !sc.inGoAway {
- if wm, ok := sc.writeSched.take(); ok {
- sc.startFrameWrite(wm)
- return
- }
- }
- if sc.needsFrameFlush {
- sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
- sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
- return
- }
-}
-
-func (sc *serverConn) goAway(code ErrCode) {
- sc.serveG.check()
- if sc.inGoAway {
- return
- }
- if code != ErrCodeNo {
- sc.shutDownIn(250 * time.Millisecond)
- } else {
- // TODO: configurable
- sc.shutDownIn(1 * time.Second)
- }
- sc.inGoAway = true
- sc.needToSendGoAway = true
- sc.goAwayCode = code
- sc.scheduleFrameWrite()
-}
-
-func (sc *serverConn) shutDownIn(d time.Duration) {
- sc.serveG.check()
- sc.shutdownTimer = time.NewTimer(d)
- sc.shutdownTimerCh = sc.shutdownTimer.C
-}
-
-func (sc *serverConn) resetStream(se StreamError) {
- sc.serveG.check()
- sc.writeFrame(frameWriteMsg{write: se})
- if st, ok := sc.streams[se.StreamID]; ok {
- st.sentReset = true
- sc.closeStream(st, se)
- }
-}
-
-// processFrameFromReader processes the serve loop's read from readFrameCh from the
-// frame-reading goroutine.
-// processFrameFromReader returns whether the connection should be kept open.
-func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
- sc.serveG.check()
- err := res.err
- if err != nil {
- if err == ErrFrameTooLarge {
- sc.goAway(ErrCodeFrameSize)
- return true // goAway will close the loop
- }
- clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
- if clientGone {
- // TODO: could we also get into this state if
- // the peer does a half close
- // (e.g. CloseWrite) because they're done
- // sending frames but they're still wanting
- // our open replies? Investigate.
- // TODO: add CloseWrite to crypto/tls.Conn first
- // so we have a way to test this? I suppose
- // just for testing we could have a non-TLS mode.
- return false
- }
- } else {
- f := res.f
- if VerboseLogs {
- sc.vlogf("http2: server read frame %v", summarizeFrame(f))
- }
- err = sc.processFrame(f)
- if err == nil {
- return true
- }
- }
-
- switch ev := err.(type) {
- case StreamError:
- sc.resetStream(ev)
- return true
- case goAwayFlowError:
- sc.goAway(ErrCodeFlowControl)
- return true
- case ConnectionError:
- sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
- sc.goAway(ErrCode(ev))
- return true // goAway will handle shutdown
- default:
- if res.err != nil {
- sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
- } else {
- sc.logf("http2: server closing client connection: %v", err)
- }
- return false
- }
-}
-
-func (sc *serverConn) processFrame(f Frame) error {
- sc.serveG.check()
-
- // First frame received must be SETTINGS.
- if !sc.sawFirstSettings {
- if _, ok := f.(*SettingsFrame); !ok {
- return ConnectionError(ErrCodeProtocol)
- }
- sc.sawFirstSettings = true
- }
-
- switch f := f.(type) {
- case *SettingsFrame:
- return sc.processSettings(f)
- case *MetaHeadersFrame:
- return sc.processHeaders(f)
- case *WindowUpdateFrame:
- return sc.processWindowUpdate(f)
- case *PingFrame:
- return sc.processPing(f)
- case *DataFrame:
- return sc.processData(f)
- case *RSTStreamFrame:
- return sc.processResetStream(f)
- case *PriorityFrame:
- return sc.processPriority(f)
- case *PushPromiseFrame:
- // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
- // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
- default:
- sc.vlogf("http2: server ignoring frame: %v", f.Header())
- return nil
- }
-}
-
-func (sc *serverConn) processPing(f *PingFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
- return nil
- }
- if f.StreamID != 0 {
- // "PING frames are not associated with any individual
- // stream. If a PING frame is received with a stream
- // identifier field value other than 0x0, the recipient MUST
- // respond with a connection error (Section 5.4.1) of type
- // PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
- }
- sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
- return nil
-}
-
-func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
- sc.serveG.check()
- switch {
- case f.StreamID != 0: // stream-level flow control
- st := sc.streams[f.StreamID]
- if st == nil {
- // "WINDOW_UPDATE can be sent by a peer that has sent a
- // frame bearing the END_STREAM flag. This means that a
- // receiver could receive a WINDOW_UPDATE frame on a "half
- // closed (remote)" or "closed" stream. A receiver MUST
- // NOT treat this as an error, see Section 5.1."
- return nil
- }
- if !st.flow.add(int32(f.Increment)) {
- return StreamError{f.StreamID, ErrCodeFlowControl}
- }
- default: // connection-level flow control
- if !sc.flow.add(int32(f.Increment)) {
- return goAwayFlowError{}
- }
- }
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
- sc.serveG.check()
-
- state, st := sc.state(f.StreamID)
- if state == stateIdle {
- // 6.4 "RST_STREAM frames MUST NOT be sent for a
- // stream in the "idle" state. If a RST_STREAM frame
- // identifying an idle stream is received, the
- // recipient MUST treat this as a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
- }
- if st != nil {
- st.gotReset = true
- sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
- }
- return nil
-}
-
-func (sc *serverConn) closeStream(st *stream, err error) {
- sc.serveG.check()
- if st.state == stateIdle || st.state == stateClosed {
- panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
- }
- st.state = stateClosed
- sc.curOpenStreams--
- if sc.curOpenStreams == 0 {
- sc.setConnState(http.StateIdle)
- }
- delete(sc.streams, st.id)
- if p := st.body; p != nil {
- p.CloseWithError(err)
- }
- st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
- sc.writeSched.forgetStream(st.id)
- if st.reqBuf != nil {
- // Stash this request body buffer (64k) away for reuse
- // by a future POST/PUT/etc.
- //
- // TODO(bradfitz): share on the server? sync.Pool?
- // Server requires locks and might hurt contention.
- // sync.Pool might work, or might be worse, depending
- // on goroutine CPU migrations. (get and put on
- // separate CPUs). Maybe a mix of strategies. But
- // this is an easy win for now.
- sc.freeRequestBodyBuf = st.reqBuf
- }
-}
-
-func (sc *serverConn) processSettings(f *SettingsFrame) error {
- sc.serveG.check()
- if f.IsAck() {
- sc.unackedSettings--
- if sc.unackedSettings < 0 {
- // Why is the peer ACKing settings we never sent?
- // The spec doesn't mention this case, but
- // hang up on them anyway.
- return ConnectionError(ErrCodeProtocol)
- }
- return nil
- }
- if err := f.ForeachSetting(sc.processSetting); err != nil {
- return err
- }
- sc.needToSendSettingsAck = true
- sc.scheduleFrameWrite()
- return nil
-}
-
-func (sc *serverConn) processSetting(s Setting) error {
- sc.serveG.check()
- if err := s.Valid(); err != nil {
- return err
- }
- if VerboseLogs {
- sc.vlogf("http2: server processing setting %v", s)
- }
- switch s.ID {
- case SettingHeaderTableSize:
- sc.headerTableSize = s.Val
- sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
- case SettingEnablePush:
- sc.pushEnabled = s.Val != 0
- case SettingMaxConcurrentStreams:
- sc.clientMaxStreams = s.Val
- case SettingInitialWindowSize:
- return sc.processSettingInitialWindowSize(s.Val)
- case SettingMaxFrameSize:
- sc.writeSched.maxFrameSize = s.Val
- case SettingMaxHeaderListSize:
- sc.peerMaxHeaderListSize = s.Val
- default:
- // Unknown setting: "An endpoint that receives a SETTINGS
- // frame with any unknown or unsupported identifier MUST
- // ignore that setting."
- if VerboseLogs {
- sc.vlogf("http2: server ignoring unknown setting %v", s)
- }
- }
- return nil
-}
-
-func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
- sc.serveG.check()
- // Note: val already validated to be within range by
- // processSetting's Valid call.
-
- // "A SETTINGS frame can alter the initial flow control window
- // size for all current streams. When the value of
- // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
- // adjust the size of all stream flow control windows that it
- // maintains by the difference between the new value and the
- // old value."
- old := sc.initialWindowSize
- sc.initialWindowSize = int32(val)
- growth := sc.initialWindowSize - old // may be negative
- for _, st := range sc.streams {
- if !st.flow.add(growth) {
- // 6.9.2 Initial Flow Control Window Size
- // "An endpoint MUST treat a change to
- // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
- // control window to exceed the maximum size as a
- // connection error (Section 5.4.1) of type
- // FLOW_CONTROL_ERROR."
- return ConnectionError(ErrCodeFlowControl)
- }
- }
- return nil
-}
-
-func (sc *serverConn) processData(f *DataFrame) error {
- sc.serveG.check()
- // "If a DATA frame is received whose stream is not in "open"
- // or "half closed (local)" state, the recipient MUST respond
- // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
- id := f.Header().StreamID
- st, ok := sc.streams[id]
- if !ok || st.state != stateOpen || st.gotTrailerHeader {
- // This includes sending a RST_STREAM if the stream is
- // in stateHalfClosedLocal (which currently means that
- // the http.Handler returned, so it's done reading &
- // done writing). Try to stop the client from sending
- // more DATA.
- return StreamError{id, ErrCodeStreamClosed}
- }
- if st.body == nil {
- panic("internal error: should have a body in this state")
- }
- data := f.Data()
-
- // Sender sending more than they'd declared?
- if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
- st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
- return StreamError{id, ErrCodeStreamClosed}
- }
- if len(data) > 0 {
- // Check whether the client has flow control quota.
- if int(st.inflow.available()) < len(data) {
- return StreamError{id, ErrCodeFlowControl}
- }
- st.inflow.take(int32(len(data)))
- wrote, err := st.body.Write(data)
- if err != nil {
- return StreamError{id, ErrCodeStreamClosed}
- }
- if wrote != len(data) {
- panic("internal error: bad Writer")
- }
- st.bodyBytes += int64(len(data))
- }
- if f.StreamEnded() {
- st.endStream()
- }
- return nil
-}
-
-// endStream closes a Request.Body's pipe. It is called when a DATA
-// frame says a request body is over (or after trailers).
-func (st *stream) endStream() {
- sc := st.sc
- sc.serveG.check()
-
- if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
- st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
- st.declBodyBytes, st.bodyBytes))
- } else {
- st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
- st.body.CloseWithError(io.EOF)
- }
- st.state = stateHalfClosedRemote
-}
-
-// copyTrailersToHandlerRequest is run in the Handler's goroutine in
-// its Request.Body.Read just before it gets io.EOF.
-func (st *stream) copyTrailersToHandlerRequest() {
- for k, vv := range st.trailer {
- if _, ok := st.reqTrailer[k]; ok {
- // Only copy it over it was pre-declared.
- st.reqTrailer[k] = vv
- }
- }
-}
-
-func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
- sc.serveG.check()
- id := f.Header().StreamID
- if sc.inGoAway {
- // Ignore.
- return nil
- }
- // http://http2.github.io/http2-spec/#rfc.section.5.1.1
- // Streams initiated by a client MUST use odd-numbered stream
- // identifiers. [...] An endpoint that receives an unexpected
- // stream identifier MUST respond with a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
- if id%2 != 1 {
- return ConnectionError(ErrCodeProtocol)
- }
- // A HEADERS frame can be used to create a new stream or
- // send a trailer for an open one. If we already have a stream
- // open, let it process its own HEADERS frame (trailers at this
- // point, if it's valid).
- st := sc.streams[f.Header().StreamID]
- if st != nil {
- return st.processTrailerHeaders(f)
- }
-
- // [...] The identifier of a newly established stream MUST be
- // numerically greater than all streams that the initiating
- // endpoint has opened or reserved. [...] An endpoint that
- // receives an unexpected stream identifier MUST respond with
- // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- if id <= sc.maxStreamID {
- return ConnectionError(ErrCodeProtocol)
- }
- sc.maxStreamID = id
-
- st = &stream{
- sc: sc,
- id: id,
- state: stateOpen,
- }
- if f.StreamEnded() {
- st.state = stateHalfClosedRemote
- }
- st.cw.Init()
-
- st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
- sc.streams[id] = st
- if f.HasPriority() {
- adjustStreamPriority(sc.streams, st.id, f.Priority)
- }
- sc.curOpenStreams++
- if sc.curOpenStreams == 1 {
- sc.setConnState(http.StateActive)
- }
- if sc.curOpenStreams > sc.advMaxStreams {
- // "Endpoints MUST NOT exceed the limit set by their
- // peer. An endpoint that receives a HEADERS frame
- // that causes their advertised concurrent stream
- // limit to be exceeded MUST treat this as a stream
- // error (Section 5.4.2) of type PROTOCOL_ERROR or
- // REFUSED_STREAM."
- if sc.unackedSettings == 0 {
- // They should know better.
- return StreamError{st.id, ErrCodeProtocol}
- }
- // Assume it's a network race, where they just haven't
- // received our last SETTINGS update. But actually
- // this can't happen yet, because we don't yet provide
- // a way for users to adjust server parameters at
- // runtime.
- return StreamError{st.id, ErrCodeRefusedStream}
- }
-
- rw, req, err := sc.newWriterAndRequest(st, f)
- if err != nil {
- return err
- }
- st.reqTrailer = req.Trailer
- if st.reqTrailer != nil {
- st.trailer = make(http.Header)
- }
- st.body = req.Body.(*requestBody).pipe // may be nil
- st.declBodyBytes = req.ContentLength
-
- handler := sc.handler.ServeHTTP
- if f.Truncated {
- // Their header list was too long. Send a 431 error.
- handler = handleHeaderListTooLong
- }
-
- go sc.runHandler(rw, req, handler)
- return nil
-}
-
-func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
- sc := st.sc
- sc.serveG.check()
- if st.gotTrailerHeader {
- return ConnectionError(ErrCodeProtocol)
- }
- st.gotTrailerHeader = true
- if !f.StreamEnded() {
- return StreamError{st.id, ErrCodeProtocol}
- }
-
- if len(f.PseudoFields()) > 0 {
- return StreamError{st.id, ErrCodeProtocol}
- }
- if st.trailer != nil {
- for _, hf := range f.RegularFields() {
- key := sc.canonicalHeader(hf.Name)
- st.trailer[key] = append(st.trailer[key], hf.Value)
- }
- }
- st.endStream()
- return nil
-}
-
-func (sc *serverConn) processPriority(f *PriorityFrame) error {
- adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
- return nil
-}
-
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
- st, ok := streams[streamID]
- if !ok {
- // TODO: not quite correct (this streamID might
- // already exist in the dep tree, but be closed), but
- // close enough for now.
- return
- }
- st.weight = priority.Weight
- parent := streams[priority.StreamDep] // might be nil
- if parent == st {
- // if client tries to set this stream to be the parent of itself
- // ignore and keep going
- return
- }
-
- // section 5.3.3: If a stream is made dependent on one of its
- // own dependencies, the formerly dependent stream is first
- // moved to be dependent on the reprioritized stream's previous
- // parent. The moved dependency retains its weight.
- for piter := parent; piter != nil; piter = piter.parent {
- if piter == st {
- parent.parent = st.parent
- break
- }
- }
- st.parent = parent
- if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
- for _, openStream := range streams {
- if openStream != st && openStream.parent == st.parent {
- openStream.parent = st
- }
- }
- }
-}
-
-func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
- sc.serveG.check()
-
- method := f.PseudoValue("method")
- path := f.PseudoValue("path")
- scheme := f.PseudoValue("scheme")
- authority := f.PseudoValue("authority")
-
- isConnect := method == "CONNECT"
- if isConnect {
- if path != "" || scheme != "" || authority == "" {
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- } else if method == "" || path == "" ||
- (scheme != "https" && scheme != "http") {
- // See 8.1.2.6 Malformed Requests and Responses:
- //
- // Malformed requests or responses that are detected
- // MUST be treated as a stream error (Section 5.4.2)
- // of type PROTOCOL_ERROR."
- //
- // 8.1.2.3 Request Pseudo-Header Fields
- // "All HTTP/2 requests MUST include exactly one valid
- // value for the :method, :scheme, and :path
- // pseudo-header fields"
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
-
- bodyOpen := !f.StreamEnded()
- if method == "HEAD" && bodyOpen {
- // HEAD requests can't have bodies
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- var tlsState *tls.ConnectionState // nil if not scheme https
-
- if scheme == "https" {
- tlsState = sc.tlsState
- }
-
- header := make(http.Header)
- for _, hf := range f.RegularFields() {
- header.Add(sc.canonicalHeader(hf.Name), hf.Value)
- }
-
- if authority == "" {
- authority = header.Get("Host")
- }
- needsContinue := header.Get("Expect") == "100-continue"
- if needsContinue {
- header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := header["Cookie"]; len(cookies) > 1 {
- header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer http.Header
- for _, v := range header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(strings.TrimSpace(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(http.Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(header, "Trailer")
-
- body := &requestBody{
- conn: sc,
- stream: st,
- needsContinue: needsContinue,
- }
- var url_ *url.URL
- var requestURI string
- if isConnect {
- url_ = &url.URL{Host: authority}
- requestURI = authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(path)
- if err != nil {
- return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
- }
- requestURI = path
- }
- req := &http.Request{
- Method: method,
- URL: url_,
- RemoteAddr: sc.remoteAddrStr,
- Header: header,
- RequestURI: requestURI,
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- ProtoMinor: 0,
- TLS: tlsState,
- Host: authority,
- Body: body,
- Trailer: trailer,
- }
- if bodyOpen {
- st.reqBuf = sc.getRequestBodyBuf()
- body.pipe = &pipe{
- b: &fixedBuffer{buf: st.reqBuf},
- }
-
- if vv, ok := header["Content-Length"]; ok {
- req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
- } else {
- req.ContentLength = -1
- }
- }
-
- rws := responseWriterStatePool.Get().(*responseWriterState)
- bwSave := rws.bw
- *rws = responseWriterState{} // zero all the fields
- rws.conn = sc
- rws.bw = bwSave
- rws.bw.Reset(chunkWriter{rws})
- rws.stream = st
- rws.req = req
- rws.body = body
-
- rw := &responseWriter{rws: rws}
- return rw, req, nil
-}
-
-func (sc *serverConn) getRequestBodyBuf() []byte {
- sc.serveG.check()
- if buf := sc.freeRequestBodyBuf; buf != nil {
- sc.freeRequestBodyBuf = nil
- return buf
- }
- return make([]byte, initialWindowSize)
-}
-
-// Run on its own goroutine.
-func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- didPanic := true
- defer func() {
- if didPanic {
- e := recover()
- // Same as net/http:
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- sc.writeFrameFromHandler(frameWriteMsg{
- write: handlerPanicRST{rw.rws.stream.id},
- stream: rw.rws.stream,
- })
- sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
- return
- }
- rw.handlerDone()
- }()
- handler(rw, req)
- didPanic = false
-}
-
-func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
- // 10.5.1 Limits on Header Block Size:
- // .. "A server that receives a larger header block than it is
- // willing to handle can send an HTTP 431 (Request Header Fields Too
- // Large) status code"
- const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
- w.WriteHeader(statusRequestHeaderFieldsTooLarge)
- io.WriteString(w, "HTTP Error 431
Request Header Field(s) Too Large
")
-}
-
-// called from handler goroutines.
-// h may be nil.
-func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
- sc.serveG.checkNotOn() // NOT on
- var errc chan error
- if headerData.h != nil {
- // If there's a header map (which we don't own), so we have to block on
- // waiting for this frame to be written, so an http.Flush mid-handler
- // writes out the correct value of keys, before a handler later potentially
- // mutates it.
- errc = errChanPool.Get().(chan error)
- }
- if err := sc.writeFrameFromHandler(frameWriteMsg{
- write: headerData,
- stream: st,
- done: errc,
- }); err != nil {
- return err
- }
- if errc != nil {
- select {
- case err := <-errc:
- errChanPool.Put(errc)
- return err
- case <-sc.doneServing:
- return errClientDisconnected
- case <-st.cw:
- return errStreamClosed
- }
- }
- return nil
-}
-
-// called from handler goroutines.
-func (sc *serverConn) write100ContinueHeaders(st *stream) {
- sc.writeFrameFromHandler(frameWriteMsg{
- write: write100ContinueHeadersFrame{st.id},
- stream: st,
- })
-}
-
-// A bodyReadMsg tells the server loop that the http.Handler read n
-// bytes of the DATA from the client on the given stream.
-type bodyReadMsg struct {
- st *stream
- n int
-}
-
-// called from handler goroutines.
-// Notes that the handler for the given stream ID read n bytes of its body
-// and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
- sc.serveG.checkNotOn() // NOT on
- select {
- case sc.bodyReadCh <- bodyReadMsg{st, n}:
- case <-sc.doneServing:
- }
-}
-
-func (sc *serverConn) noteBodyRead(st *stream, n int) {
- sc.serveG.check()
- sc.sendWindowUpdate(nil, n) // conn-level
- if st.state != stateHalfClosedRemote && st.state != stateClosed {
- // Don't send this WINDOW_UPDATE if the stream is closed
- // remotely.
- sc.sendWindowUpdate(st, n)
- }
-}
-
-// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
- sc.serveG.check()
- // "The legal range for the increment to the flow control
- // window is 1 to 2^31-1 (2,147,483,647) octets."
- // A Go Read call on 64-bit machines could in theory read
- // a larger Read than this. Very unlikely, but we handle it here
- // rather than elsewhere for now.
- const maxUint31 = 1<<31 - 1
- for n >= maxUint31 {
- sc.sendWindowUpdate32(st, maxUint31)
- n -= maxUint31
- }
- sc.sendWindowUpdate32(st, int32(n))
-}
-
-// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
- sc.serveG.check()
- if n == 0 {
- return
- }
- if n < 0 {
- panic("negative update")
- }
- var streamID uint32
- if st != nil {
- streamID = st.id
- }
- sc.writeFrame(frameWriteMsg{
- write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
- stream: st,
- })
- var ok bool
- if st == nil {
- ok = sc.inflow.add(n)
- } else {
- ok = st.inflow.add(n)
- }
- if !ok {
- panic("internal error; sent too many window updates without decrements?")
- }
-}
-
-type requestBody struct {
- stream *stream
- conn *serverConn
- closed bool
- pipe *pipe // non-nil if we have a HTTP entity message body
- needsContinue bool // need to send a 100-continue
-}
-
-func (b *requestBody) Close() error {
- if b.pipe != nil {
- b.pipe.CloseWithError(errClosedBody)
- }
- b.closed = true
- return nil
-}
-
-func (b *requestBody) Read(p []byte) (n int, err error) {
- if b.needsContinue {
- b.needsContinue = false
- b.conn.write100ContinueHeaders(b.stream)
- }
- if b.pipe == nil {
- return 0, io.EOF
- }
- n, err = b.pipe.Read(p)
- if n > 0 {
- b.conn.noteBodyReadFromHandler(b.stream, n)
- }
- return
-}
-
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
-// responseWriterState pointer inside is zeroed at the end of a
-// request (in handlerDone) and calls on the responseWriter thereafter
-// simply crash (caller's mistake), but the much larger responseWriterState
-// and buffers are reused between multiple requests.
-type responseWriter struct {
- rws *responseWriterState
-}
-
-// Optional http.ResponseWriter interfaces implemented.
-var (
- _ http.CloseNotifier = (*responseWriter)(nil)
- _ http.Flusher = (*responseWriter)(nil)
- _ stringWriter = (*responseWriter)(nil)
-)
-
-type responseWriterState struct {
- // immutable within a request:
- stream *stream
- req *http.Request
- body *requestBody // to close at end of request, if DATA frames didn't
- conn *serverConn
-
- // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
- bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
-
- // mutated by http.Handler goroutine:
- handlerHeader http.Header // nil until called
- snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
- trailers []string // set in writeChunk
- status int // status code passed to WriteHeader
- wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
- sentHeader bool // have we sent the header frame?
- handlerDone bool // handler has finished
-
- sentContentLen int64 // non-zero if handler set a Content-Length header
- wroteBytes int64
-
- closeNotifierMu sync.Mutex // guards closeNotifierCh
- closeNotifierCh chan bool // nil until first used
-}
-
-type chunkWriter struct{ rws *responseWriterState }
-
-func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
-
-func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
-
-// declareTrailer is called for each Trailer header when the
-// response header is written. It notes that a header will need to be
-// written in the trailers at the end of the response.
-func (rws *responseWriterState) declareTrailer(k string) {
- k = http.CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Content-Length", "Trailer":
- // Forbidden by RFC 2616 14.40.
- return
- }
- if !strSliceContains(rws.trailers, k) {
- rws.trailers = append(rws.trailers, k)
- }
-}
-
-// writeChunk writes chunks from the bufio.Writer. But because
-// bufio.Writer may bypass its chunking, sometimes p may be
-// arbitrarily large.
-//
-// writeChunk is also responsible (on the first chunk) for sending the
-// HEADER response.
-func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
- if !rws.wroteHeader {
- rws.writeHeader(200)
- }
-
- isHeadResp := rws.req.Method == "HEAD"
- if !rws.sentHeader {
- rws.sentHeader = true
- var ctype, clen string
- if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
- rws.snapHeader.Del("Content-Length")
- clen64, err := strconv.ParseInt(clen, 10, 64)
- if err == nil && clen64 >= 0 {
- rws.sentContentLen = clen64
- } else {
- clen = ""
- }
- }
- if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
- clen = strconv.Itoa(len(p))
- }
- _, hasContentType := rws.snapHeader["Content-Type"]
- if !hasContentType && bodyAllowedForStatus(rws.status) {
- ctype = http.DetectContentType(p)
- }
- var date string
- if _, ok := rws.snapHeader["Date"]; !ok {
- // TODO(bradfitz): be faster here, like net/http? measure.
- date = time.Now().UTC().Format(http.TimeFormat)
- }
-
- for _, v := range rws.snapHeader["Trailer"] {
- foreachHeaderElement(v, rws.declareTrailer)
- }
-
- endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
- err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
- streamID: rws.stream.id,
- httpResCode: rws.status,
- h: rws.snapHeader,
- endStream: endStream,
- contentType: ctype,
- contentLength: clen,
- date: date,
- })
- if err != nil {
- return 0, err
- }
- if endStream {
- return 0, nil
- }
- }
- if isHeadResp {
- return len(p), nil
- }
- if len(p) == 0 && !rws.handlerDone {
- return 0, nil
- }
-
- if rws.handlerDone {
- rws.promoteUndeclaredTrailers()
- }
-
- endStream := rws.handlerDone && !rws.hasTrailers()
- if len(p) > 0 || endStream {
- // only send a 0 byte DATA frame if we're ending the stream.
- if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
- return 0, err
- }
- }
-
- if rws.handlerDone && rws.hasTrailers() {
- err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
- streamID: rws.stream.id,
- h: rws.handlerHeader,
- trailers: rws.trailers,
- endStream: true,
- })
- return len(p), err
- }
- return len(p), nil
-}
-
-// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
-// that, if present, signals that the map entry is actually for
-// the response trailers, and not the response headers. The prefix
-// is stripped after the ServeHTTP call finishes and the values are
-// sent in the trailers.
-//
-// This mechanism is intended only for trailers that are not known
-// prior to the headers being written. If the set of trailers is fixed
-// or known before the header is written, the normal Go trailers mechanism
-// is preferred:
-// https://golang.org/pkg/net/http/#ResponseWriter
-// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
-const TrailerPrefix = "Trailer:"
-
-// promoteUndeclaredTrailers permits http.Handlers to set trailers
-// after the header has already been flushed. Because the Go
-// ResponseWriter interface has no way to set Trailers (only the
-// Header), and because we didn't want to expand the ResponseWriter
-// interface, and because nobody used trailers, and because RFC 2616
-// says you SHOULD (but not must) predeclare any trailers in the
-// header, the official ResponseWriter rules said trailers in Go must
-// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
-// Trailers, we pick out the fields of Headers that were declared as
-// trailers. That worked for a while, until we found the first major
-// user of Trailers in the wild: gRPC (using them only over http2),
-// and gRPC libraries permit setting trailers mid-stream without
-// predeclarnig them. So: change of plans. We still permit the old
-// way, but we also permit this hack: if a Header() key begins with
-// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
-// invalid token byte anyway, there is no ambiguity. (And it's already
-// filtered out) It's mildly hacky, but not terrible.
-//
-// This method runs after the Handler is done and promotes any Header
-// fields to be trailers.
-func (rws *responseWriterState) promoteUndeclaredTrailers() {
- for k, vv := range rws.handlerHeader {
- if !strings.HasPrefix(k, TrailerPrefix) {
- continue
- }
- trailerKey := strings.TrimPrefix(k, TrailerPrefix)
- rws.declareTrailer(trailerKey)
- rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
- }
-
- if len(rws.trailers) > 1 {
- sorter := sorterPool.Get().(*sorter)
- sorter.SortStrings(rws.trailers)
- sorterPool.Put(sorter)
- }
-}
-
-func (w *responseWriter) Flush() {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.bw.Buffered() > 0 {
- if err := rws.bw.Flush(); err != nil {
- // Ignore the error. The frame writer already knows.
- return
- }
- } else {
- // The bufio.Writer won't call chunkWriter.Write
- // (writeChunk with zero bytes, so we have to do it
- // ourselves to force the HTTP response header and/or
- // final DATA frame (with END_STREAM) to be sent.
- rws.writeChunk(nil)
- }
-}
-
-func (w *responseWriter) CloseNotify() <-chan bool {
- rws := w.rws
- if rws == nil {
- panic("CloseNotify called after Handler finished")
- }
- rws.closeNotifierMu.Lock()
- ch := rws.closeNotifierCh
- if ch == nil {
- ch = make(chan bool, 1)
- rws.closeNotifierCh = ch
- go func() {
- rws.stream.cw.Wait() // wait for close
- ch <- true
- }()
- }
- rws.closeNotifierMu.Unlock()
- return ch
-}
-
-func (w *responseWriter) Header() http.Header {
- rws := w.rws
- if rws == nil {
- panic("Header called after Handler finished")
- }
- if rws.handlerHeader == nil {
- rws.handlerHeader = make(http.Header)
- }
- return rws.handlerHeader
-}
-
-func (w *responseWriter) WriteHeader(code int) {
- rws := w.rws
- if rws == nil {
- panic("WriteHeader called after Handler finished")
- }
- rws.writeHeader(code)
-}
-
-func (rws *responseWriterState) writeHeader(code int) {
- if !rws.wroteHeader {
- rws.wroteHeader = true
- rws.status = code
- if len(rws.handlerHeader) > 0 {
- rws.snapHeader = cloneHeader(rws.handlerHeader)
- }
- }
-}
-
-func cloneHeader(h http.Header) http.Header {
- h2 := make(http.Header, len(h))
- for k, vv := range h {
- vv2 := make([]string, len(vv))
- copy(vv2, vv)
- h2[k] = vv2
- }
- return h2
-}
-
-// The Life Of A Write is like this:
-//
-// * Handler calls w.Write or w.WriteString ->
-// * -> rws.bw (*bufio.Writer) ->
-// * (Handler migth call Flush)
-// * -> chunkWriter{rws}
-// * -> responseWriterState.writeChunk(p []byte)
-// * -> responseWriterState.writeChunk (most of the magic; see comment there)
-func (w *responseWriter) Write(p []byte) (n int, err error) {
- return w.write(len(p), p, "")
-}
-
-func (w *responseWriter) WriteString(s string) (n int, err error) {
- return w.write(len(s), nil, s)
-}
-
-// either dataB or dataS is non-zero.
-func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
- rws := w.rws
- if rws == nil {
- panic("Write called after Handler finished")
- }
- if !rws.wroteHeader {
- w.WriteHeader(200)
- }
- if !bodyAllowedForStatus(rws.status) {
- return 0, http.ErrBodyNotAllowed
- }
- rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
- if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
- // TODO: send a RST_STREAM
- return 0, errors.New("http2: handler wrote more than declared Content-Length")
- }
-
- if dataB != nil {
- return rws.bw.Write(dataB)
- } else {
- return rws.bw.WriteString(dataS)
- }
-}
-
-func (w *responseWriter) handlerDone() {
- rws := w.rws
- rws.handlerDone = true
- w.Flush()
- w.rws = nil
- responseWriterStatePool.Put(rws)
-}
-
-// foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 2616 section 2.1 and calls fn for each non-empty element.
-func foreachHeaderElement(v string, fn func(string)) {
- v = textproto.TrimString(v)
- if v == "" {
- return
- }
- if !strings.Contains(v, ",") {
- fn(v)
- return
- }
- for _, f := range strings.Split(v, ",") {
- if f = textproto.TrimString(f); f != "" {
- fn(f)
- }
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go
deleted file mode 100644
index 7d558a4bdb..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go
+++ /dev/null
@@ -1,1666 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Transport code.
-
-package http2
-
-import (
- "bufio"
- "bytes"
- "compress/gzip"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-const (
- // transportDefaultConnFlow is how many connection-level flow control
- // tokens we give the server at start-up, past the default 64k.
- transportDefaultConnFlow = 1 << 30
-
- // transportDefaultStreamFlow is how many stream-level flow
- // control tokens we announce to the peer, and how many bytes
- // we buffer per stream.
- transportDefaultStreamFlow = 4 << 20
-
- // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
- // a stream-level WINDOW_UPDATE for at a time.
- transportDefaultStreamMinRefresh = 4 << 10
-
- defaultUserAgent = "Go-http-client/2.0"
-)
-
-// Transport is an HTTP/2 Transport.
-//
-// A Transport internally caches connections to servers. It is safe
-// for concurrent use by multiple goroutines.
-type Transport struct {
- // DialTLS specifies an optional dial function for creating
- // TLS connections for requests.
- //
- // If DialTLS is nil, tls.Dial is used.
- //
- // If the returned net.Conn has a ConnectionState method like tls.Conn,
- // it will be used to set http.Response.TLS.
- DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
-
- // TLSClientConfig specifies the TLS configuration to use with
- // tls.Client. If nil, the default configuration is used.
- TLSClientConfig *tls.Config
-
- // ConnPool optionally specifies an alternate connection pool to use.
- // If nil, the default is used.
- ConnPool ClientConnPool
-
- // DisableCompression, if true, prevents the Transport from
- // requesting compression with an "Accept-Encoding: gzip"
- // request header when the Request contains no existing
- // Accept-Encoding value. If the Transport requests gzip on
- // its own and gets a gzipped response, it's transparently
- // decoded in the Response.Body. However, if the user
- // explicitly requested gzip it is not automatically
- // uncompressed.
- DisableCompression bool
-
- // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
- // send in the initial settings frame. It is how many bytes
- // of response headers are allow. Unlike the http2 spec, zero here
- // means to use a default limit (currently 10MB). If you actually
- // want to advertise an ulimited value to the peer, Transport
- // interprets the highest possible value here (0xffffffff or 1<<32-1)
- // to mean no limit.
- MaxHeaderListSize uint32
-
- // t1, if non-nil, is the standard library Transport using
- // this transport. Its settings are used (but not its
- // RoundTrip method, etc).
- t1 *http.Transport
-
- connPoolOnce sync.Once
- connPoolOrDef ClientConnPool // non-nil version of ConnPool
-}
-
-func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
- return 10 << 20
- }
- if t.MaxHeaderListSize == 0xffffffff {
- return 0
- }
- return t.MaxHeaderListSize
-}
-
-func (t *Transport) disableCompression() bool {
- return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
-}
-
-var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
-
-// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
-// It requires Go 1.6 or later and returns an error if the net/http package is too old
-// or if t1 has already been HTTP/2-enabled.
-func ConfigureTransport(t1 *http.Transport) error {
- _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
- return err
-}
-
-func (t *Transport) connPool() ClientConnPool {
- t.connPoolOnce.Do(t.initConnPool)
- return t.connPoolOrDef
-}
-
-func (t *Transport) initConnPool() {
- if t.ConnPool != nil {
- t.connPoolOrDef = t.ConnPool
- } else {
- t.connPoolOrDef = &clientConnPool{t: t}
- }
-}
-
-// ClientConn is the state of a single HTTP/2 client connection to an
-// HTTP/2 server.
-type ClientConn struct {
- t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tlsState *tls.ConnectionState // nil only for specialized impls
-
- // readLoop goroutine fields:
- readerDone chan struct{} // closed on error
- readerErr error // set before readerDone is closed
-
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow flow // our conn-level flow control quota (cs.flow is per stream)
- inflow flow // peer's conn-level flow control
- closed bool
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- streams map[uint32]*clientStream // client-initiated
- nextStreamID uint32
- bw *bufio.Writer
- br *bufio.Reader
- fr *Framer
- // Settings from peer:
- maxFrameSize uint32
- maxConcurrentStreams uint32
- initialWindowSize uint32
- hbuf bytes.Buffer // HPACK encoder writes into this
- henc *hpack.Encoder
- freeBuf [][]byte
-
- wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
- werr error // first write error that has occurred
-}
-
-// clientStream is the state for a single HTTP/2 stream. One of these
-// is created for each Transport.RoundTrip call.
-type clientStream struct {
- cc *ClientConn
- req *http.Request
- ID uint32
- resc chan resAndError
- bufPipe pipe // buffered pipe with the flow-controlled response payload
- requestedGzip bool
-
- flow flow // guarded by cc.mu
- inflow flow // guarded by cc.mu
- bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
- readErr error // sticky read error; owned by transportResponseBody.Read
- stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
-
- peerReset chan struct{} // closed on peer reset
- resetErr error // populated before peerReset is closed
-
- done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
-
- // owned by clientConnReadLoop:
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
-
- trailer http.Header // accumulated trailers
- resTrailer *http.Header // client's Response.Trailer
-}
-
-// awaitRequestCancel runs in its own goroutine and waits for the user
-// to either cancel a RoundTrip request (using the provided
-// Request.Cancel channel), or for the request to be done (any way it
-// might be removed from the cc.streams map: peer reset, successful
-// completion, TCP connection breakage, etc)
-func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) {
- if cancel == nil {
- return
- }
- select {
- case <-cancel:
- cs.bufPipe.CloseWithError(errRequestCanceled)
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- case <-cs.done:
- }
-}
-
-// checkReset reports any error sent in a RST_STREAM frame by the
-// server.
-func (cs *clientStream) checkReset() error {
- select {
- case <-cs.peerReset:
- return cs.resetErr
- default:
- return nil
- }
-}
-
-func (cs *clientStream) abortRequestBodyWrite(err error) {
- if err == nil {
- panic("nil error")
- }
- cc := cs.cc
- cc.mu.Lock()
- cs.stopReqBody = err
- cc.cond.Broadcast()
- cc.mu.Unlock()
-}
-
-type stickyErrWriter struct {
- w io.Writer
- err *error
-}
-
-func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
- if *sew.err != nil {
- return 0, *sew.err
- }
- n, err = sew.w.Write(p)
- *sew.err = err
- return
-}
-
-var ErrNoCachedConn = errors.New("http2: no cached connection was available")
-
-// RoundTripOpt are options for the Transport.RoundTripOpt method.
-type RoundTripOpt struct {
- // OnlyCachedConn controls whether RoundTripOpt may
- // create a new TCP connection. If set true and
- // no cached connection is available, RoundTripOpt
- // will return ErrNoCachedConn.
- OnlyCachedConn bool
-}
-
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- return t.RoundTripOpt(req, RoundTripOpt{})
-}
-
-// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
-// and returns a host:port. The port 443 is added if needed.
-func authorityAddr(authority string) (addr string) {
- if _, _, err := net.SplitHostPort(authority); err == nil {
- return authority
- }
- return net.JoinHostPort(authority, "443")
-}
-
-// RoundTripOpt is like RoundTrip, but takes options.
-func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
- if req.URL.Scheme != "https" {
- return nil, errors.New("http2: unsupported scheme")
- }
-
- addr := authorityAddr(req.URL.Host)
- for {
- cc, err := t.connPool().GetClientConn(req, addr)
- if err != nil {
- t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
- return nil, err
- }
- res, err := cc.RoundTrip(req)
- if shouldRetryRequest(req, err) {
- continue
- }
- if err != nil {
- t.vlogf("RoundTrip failure: %v", err)
- return nil, err
- }
- return res, nil
- }
-}
-
-// CloseIdleConnections closes any connections which were previously
-// connected from previous requests but are now sitting idle.
-// It does not interrupt any connections currently in use.
-func (t *Transport) CloseIdleConnections() {
- if cp, ok := t.connPool().(*clientConnPool); ok {
- cp.closeIdleConnections()
- }
-}
-
-var (
- errClientConnClosed = errors.New("http2: client conn is closed")
- errClientConnUnusable = errors.New("http2: client conn not usable")
-)
-
-func shouldRetryRequest(req *http.Request, err error) bool {
- // TODO: retry GET requests (no bodies) more aggressively, if shutdown
- // before response.
- return err == errClientConnUnusable
-}
-
-func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
- if err != nil {
- return nil, err
- }
- return t.NewClientConn(tconn)
-}
-
-func (t *Transport) newTLSConfig(host string) *tls.Config {
- cfg := new(tls.Config)
- if t.TLSClientConfig != nil {
- *cfg = *t.TLSClientConfig
- }
- if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
- cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
- }
- if cfg.ServerName == "" {
- cfg.ServerName = host
- }
- return cfg
-}
-
-func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
- if t.DialTLS != nil {
- return t.DialTLS
- }
- return t.dialTLSDefault
-}
-
-func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
- cn, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- if err := cn.Handshake(); err != nil {
- return nil, err
- }
- if !cfg.InsecureSkipVerify {
- if err := cn.VerifyHostname(cfg.ServerName); err != nil {
- return nil, err
- }
- }
- state := cn.ConnectionState()
- if p := state.NegotiatedProtocol; p != NextProtoTLS {
- return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
- }
- if !state.NegotiatedProtocolIsMutual {
- return nil, errors.New("http2: could not negotiate protocol mutually")
- }
- return cn, nil
-}
-
-// disableKeepAlives reports whether connections should be closed as
-// soon as possible after handling the first request.
-func (t *Transport) disableKeepAlives() bool {
- return t.t1 != nil && t.t1.DisableKeepAlives
-}
-
-func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- if VerboseLogs {
- t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
- }
- if _, err := c.Write(clientPreface); err != nil {
- t.vlogf("client preface write error: %v", err)
- return nil, err
- }
-
- cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
- streams: make(map[uint32]*clientStream),
- }
- cc.cond = sync.NewCond(&cc.mu)
- cc.flow.add(int32(initialWindowSize))
-
- // TODO: adjust this writer size to account for frame size +
- // MTU + crypto/tls record padding.
- cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
- cc.br = bufio.NewReader(c)
- cc.fr = NewFramer(cc.bw, cc.br)
- cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
- cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
-
- // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
- // henc in response to SETTINGS frames?
- cc.henc = hpack.NewEncoder(&cc.hbuf)
-
- if cs, ok := c.(connectionStater); ok {
- state := cs.ConnectionState()
- cc.tlsState = &state
- }
-
- initialSettings := []Setting{
- {ID: SettingEnablePush, Val: 0},
- {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxHeaderListSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
- }
- cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
- cc.bw.Flush()
- if cc.werr != nil {
- return nil, cc.werr
- }
-
- // Read the obligatory SETTINGS frame
- f, err := cc.fr.ReadFrame()
- if err != nil {
- return nil, err
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- return nil, fmt.Errorf("expected settings frame, got: %T", f)
- }
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
-
- sf.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- case SettingInitialWindowSize:
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
- t.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
-
- go cc.readLoop()
- return cc, nil
-}
-
-func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.goAway = f
-}
-
-func (cc *ClientConn) CanTakeNewRequest() bool {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.canTakeNewRequestLocked()
-}
-
-func (cc *ClientConn) canTakeNewRequestLocked() bool {
- return cc.goAway == nil && !cc.closed &&
- int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
- cc.nextStreamID < 2147483647
-}
-
-func (cc *ClientConn) closeIfIdle() {
- cc.mu.Lock()
- if len(cc.streams) > 0 {
- cc.mu.Unlock()
- return
- }
- cc.closed = true
- // TODO: do clients send GOAWAY too? maybe? Just Close:
- cc.mu.Unlock()
-
- cc.tconn.Close()
-}
-
-const maxAllocFrameSize = 512 << 10
-
-// frameBuffer returns a scratch buffer suitable for writing DATA frames.
-// They're capped at the min of the peer's max frame size or 512KB
-// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
-// bufers.
-func (cc *ClientConn) frameScratchBuffer() []byte {
- cc.mu.Lock()
- size := cc.maxFrameSize
- if size > maxAllocFrameSize {
- size = maxAllocFrameSize
- }
- for i, buf := range cc.freeBuf {
- if len(buf) >= int(size) {
- cc.freeBuf[i] = nil
- cc.mu.Unlock()
- return buf[:size]
- }
- }
- cc.mu.Unlock()
- return make([]byte, size)
-}
-
-func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
- if len(cc.freeBuf) < maxBufs {
- cc.freeBuf = append(cc.freeBuf, buf)
- return
- }
- for i, old := range cc.freeBuf {
- if old == nil {
- cc.freeBuf[i] = buf
- return
- }
- }
- // forget about it.
-}
-
-// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
-// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
-var errRequestCanceled = errors.New("net/http: request canceled")
-
-func commaSeparatedTrailers(req *http.Request) (string, error) {
- keys := make([]string, 0, len(req.Trailer))
- for k := range req.Trailer {
- k = http.CanonicalHeaderKey(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", &badStringError{"invalid Trailer key", k}
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- // TODO: could do better allocation-wise here, but trailers are rare,
- // so being lazy for now.
- return strings.Join(keys, ","), nil
- }
- return "", nil
-}
-
-func (cc *ClientConn) responseHeaderTimeout() time.Duration {
- if cc.t.t1 != nil {
- return cc.t.t1.ResponseHeaderTimeout
- }
- // No way to do this (yet?) with just an http2.Transport. Probably
- // no need. Request.Cancel this is the new way. We only need to support
- // this for compatibility with the old http.Transport fields when
- // we're doing transparent http2.
- return 0
-}
-
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
- if v := req.Header.Get("Upgrade"); v != "" {
- return errors.New("http2: invalid Upgrade request header")
- }
- if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
- return errors.New("http2: invalid Transfer-Encoding request header")
- }
- if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
- return errors.New("http2: invalid Connection request header")
- }
- return nil
-}
-
-func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
- if err := checkConnHeaders(req); err != nil {
- return nil, err
- }
-
- trailers, err := commaSeparatedTrailers(req)
- if err != nil {
- return nil, err
- }
- hasTrailers := trailers != ""
-
- var body io.Reader = req.Body
- contentLen := req.ContentLength
- if req.Body != nil && contentLen == 0 {
- // Test to see if it's actually zero or just unset.
- var buf [1]byte
- n, rerr := io.ReadFull(body, buf[:])
- if rerr != nil && rerr != io.EOF {
- contentLen = -1
- body = errorReader{rerr}
- } else if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stich the Body back together again, re-attaching our
- // consumed byte.
- contentLen = -1
- body = io.MultiReader(bytes.NewReader(buf[:]), body)
- } else {
- // Body is actually empty.
- body = nil
- }
- }
-
- cc.mu.Lock()
- if cc.closed || !cc.canTakeNewRequestLocked() {
- cc.mu.Unlock()
- return nil, errClientConnUnusable
- }
-
- cs := cc.newStream()
- cs.req = req
- hasBody := body != nil
-
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- req.Method != "HEAD" {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
- }
-
- // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
- // sent by writeRequestBody below, along with any Trailers,
- // again in form HEADERS{1}, CONTINUATION{0,})
- hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
- cc.wmu.Lock()
- endStream := !hasBody && !hasTrailers
- werr := cc.writeHeaders(cs.ID, endStream, hdrs)
- cc.wmu.Unlock()
- cc.mu.Unlock()
-
- if werr != nil {
- if hasBody {
- req.Body.Close() // per RoundTripper contract
- }
- cc.forgetStreamID(cs.ID)
- // Don't bother sending a RST_STREAM (our write already failed;
- // no need to keep writing)
- return nil, werr
- }
-
- var respHeaderTimer <-chan time.Time
- var bodyCopyErrc chan error // result of body copy
- if hasBody {
- bodyCopyErrc = make(chan error, 1)
- go func() {
- bodyCopyErrc <- cs.writeRequestBody(body, req.Body)
- }()
- } else {
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
- }
-
- readLoopResCh := cs.resc
- requestCanceledCh := requestCancel(req)
- bodyWritten := false
-
- for {
- select {
- case re := <-readLoopResCh:
- res := re.res
- if re.err != nil || res.StatusCode > 299 {
- // On error or status code 3xx, 4xx, 5xx, etc abort any
- // ongoing write, assuming that the server doesn't care
- // about our request body. If the server replied with 1xx or
- // 2xx, however, then assume the server DOES potentially
- // want our body (e.g. full-duplex streaming:
- // golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
- // we can keep it.
- cs.abortRequestBodyWrite(errStopReqBodyWrite)
- }
- if re.err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, re.err
- }
- res.Request = req
- res.TLS = cc.tlsState
- return res, nil
- case <-respHeaderTimer:
- cc.forgetStreamID(cs.ID)
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- }
- return nil, errTimeout
- case <-requestCanceledCh:
- cc.forgetStreamID(cs.ID)
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- }
- return nil, errRequestCanceled
- case <-cs.peerReset:
- // processResetStream already removed the
- // stream from the streams map; no need for
- // forgetStreamID.
- return nil, cs.resetErr
- case err := <-bodyCopyErrc:
- if err != nil {
- return nil, err
- }
- bodyWritten = true
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
- }
- }
-}
-
-// requires cc.wmu be held
-func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
- first := true // first frame written (HEADERS is first, then CONTINUATION)
- frameSize := int(cc.maxFrameSize)
- for len(hdrs) > 0 && cc.werr == nil {
- chunk := hdrs
- if len(chunk) > frameSize {
- chunk = chunk[:frameSize]
- }
- hdrs = hdrs[len(chunk):]
- endHeaders := len(hdrs) == 0
- if first {
- cc.fr.WriteHeaders(HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: chunk,
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- first = false
- } else {
- cc.fr.WriteContinuation(streamID, endHeaders, chunk)
- }
- }
- // TODO(bradfitz): this Flush could potentially block (as
- // could the WriteHeaders call(s) above), which means they
- // wouldn't respond to Request.Cancel being readable. That's
- // rare, but this should probably be in a goroutine.
- cc.bw.Flush()
- return cc.werr
-}
-
-// internal error values; they don't escape to callers
-var (
- // abort request body write; don't send cancel
- errStopReqBodyWrite = errors.New("http2: aborting request body write")
-
- // abort request body write, but send stream reset of cancel.
- errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
-)
-
-func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
- cc := cs.cc
- sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
- buf := cc.frameScratchBuffer()
- defer cc.putFrameScratchBuffer(buf)
-
- defer func() {
- // TODO: write h12Compare test showing whether
- // Request.Body is closed by the Transport,
- // and in multiple cases: server replies <=299 and >299
- // while still writing request body
- cerr := bodyCloser.Close()
- if err == nil {
- err = cerr
- }
- }()
-
- req := cs.req
- hasTrailers := req.Trailer != nil
-
- var sawEOF bool
- for !sawEOF {
- n, err := body.Read(buf)
- if err == io.EOF {
- sawEOF = true
- err = nil
- } else if err != nil {
- return err
- }
-
- remain := buf[:n]
- for len(remain) > 0 && err == nil {
- var allowed int32
- allowed, err = cs.awaitFlowControl(len(remain))
- switch {
- case err == errStopReqBodyWrite:
- return err
- case err == errStopReqBodyWriteAndCancel:
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- return err
- case err != nil:
- return err
- }
- cc.wmu.Lock()
- data := remain[:allowed]
- remain = remain[allowed:]
- sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
- err = cc.fr.WriteData(cs.ID, sentEnd, data)
- if err == nil {
- // TODO(bradfitz): this flush is for latency, not bandwidth.
- // Most requests won't need this. Make this opt-in or opt-out?
- // Use some heuristic on the body type? Nagel-like timers?
- // Based on 'n'? Only last chunk of this for loop, unless flow control
- // tokens are low? For now, always:
- err = cc.bw.Flush()
- }
- cc.wmu.Unlock()
- }
- if err != nil {
- return err
- }
- }
-
- cc.wmu.Lock()
- if !sentEnd {
- var trls []byte
- if hasTrailers {
- cc.mu.Lock()
- trls = cc.encodeTrailers(req)
- cc.mu.Unlock()
- }
-
- // Avoid forgetting to send an END_STREAM if the encoded
- // trailers are 0 bytes. Both results produce and END_STREAM.
- if len(trls) > 0 {
- err = cc.writeHeaders(cs.ID, true, trls)
- } else {
- err = cc.fr.WriteData(cs.ID, true, nil)
- }
- }
- if ferr := cc.bw.Flush(); ferr != nil && err == nil {
- err = ferr
- }
- cc.wmu.Unlock()
-
- return err
-}
-
-// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
-// control tokens from the server.
-// It returns either the non-zero number of tokens taken or an error
-// if the stream is dead.
-func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
- cc := cs.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- for {
- if cc.closed {
- return 0, errClientConnClosed
- }
- if cs.stopReqBody != nil {
- return 0, cs.stopReqBody
- }
- if err := cs.checkReset(); err != nil {
- return 0, err
- }
- if a := cs.flow.available(); a > 0 {
- take := a
- if int(take) > maxBytes {
-
- take = int32(maxBytes) // can't truncate int; take is int32
- }
- if take > int32(cc.maxFrameSize) {
- take = int32(cc.maxFrameSize)
- }
- cs.flow.take(take)
- return take, nil
- }
- cc.cond.Wait()
- }
-}
-
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
-// requires cc.mu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte {
- cc.hbuf.Reset()
-
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
-
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
- // [RFC3986]).
- cc.writeHeader(":authority", host)
- cc.writeHeader(":method", req.Method)
- if req.Method != "CONNECT" {
- cc.writeHeader(":path", req.URL.RequestURI())
- cc.writeHeader(":scheme", "https")
- }
- if trailers != "" {
- cc.writeHeader("trailer", trailers)
- }
-
- var didUA bool
- for k, vv := range req.Header {
- lowKey := strings.ToLower(k)
- switch lowKey {
- case "host", "content-length":
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- case "connection", "proxy-connection", "transfer-encoding", "upgrade":
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We deal with these earlier in
- // RoundTrip, deciding whether they're
- // error-worthy, but we don't want to mutate
- // the user's *Request so at this point, just
- // skip over them at this point.
- continue
- case "user-agent":
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
- }
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- if shouldSendReqContentLength(req.Method, contentLength) {
- cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- cc.writeHeader("accept-encoding", "gzip")
- }
- if !didUA {
- cc.writeHeader("user-agent", defaultUserAgent)
- }
- return cc.hbuf.Bytes()
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
- if contentLength > 0 {
- return true
- }
- if contentLength < 0 {
- return false
- }
- // For zero bodies, whether we send a content-length depends on the method.
- // It also kinda doesn't matter for http2 either way, with END_STREAM.
- switch method {
- case "POST", "PUT", "PATCH":
- return true
- default:
- return false
- }
-}
-
-// requires cc.mu be held.
-func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
- cc.hbuf.Reset()
- for k, vv := range req.Trailer {
- // Transfer-Encoding, etc.. have already been filter at the
- // start of RoundTrip
- lowKey := strings.ToLower(k)
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- return cc.hbuf.Bytes()
-}
-
-func (cc *ClientConn) writeHeader(name, value string) {
- if VerboseLogs {
- log.Printf("http2: Transport encoding header %q = %q", name, value)
- }
- cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
-}
-
-type resAndError struct {
- res *http.Response
- err error
-}
-
-// requires cc.mu be held.
-func (cc *ClientConn) newStream() *clientStream {
- cs := &clientStream{
- cc: cc,
- ID: cc.nextStreamID,
- resc: make(chan resAndError, 1),
- peerReset: make(chan struct{}),
- done: make(chan struct{}),
- }
- cs.flow.add(int32(cc.initialWindowSize))
- cs.flow.setConnFlow(&cc.flow)
- cs.inflow.add(transportDefaultStreamFlow)
- cs.inflow.setConnFlow(&cc.inflow)
- cc.nextStreamID += 2
- cc.streams[cs.ID] = cs
- return cs
-}
-
-func (cc *ClientConn) forgetStreamID(id uint32) {
- cc.streamByID(id, true)
-}
-
-func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- cs := cc.streams[id]
- if andRemove && cs != nil && !cc.closed {
- delete(cc.streams, id)
- close(cs.done)
- }
- return cs
-}
-
-// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
-type clientConnReadLoop struct {
- cc *ClientConn
- activeRes map[uint32]*clientStream // keyed by streamID
- closeWhenIdle bool
-}
-
-// readLoop runs in its own goroutine and reads and dispatches frames.
-func (cc *ClientConn) readLoop() {
- rl := &clientConnReadLoop{
- cc: cc,
- activeRes: make(map[uint32]*clientStream),
- }
-
- defer rl.cleanup()
- cc.readerErr = rl.run()
- if ce, ok := cc.readerErr.(ConnectionError); ok {
- cc.wmu.Lock()
- cc.fr.WriteGoAway(0, ErrCode(ce), nil)
- cc.wmu.Unlock()
- }
-}
-
-func (rl *clientConnReadLoop) cleanup() {
- cc := rl.cc
- defer cc.tconn.Close()
- defer cc.t.connPool().MarkDead(cc)
- defer close(cc.readerDone)
-
- // Close any response bodies if the server closes prematurely.
- // TODO: also do this if we've written the headers but not
- // gotten a response yet.
- err := cc.readerErr
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- cc.mu.Lock()
- for _, cs := range rl.activeRes {
- cs.bufPipe.CloseWithError(err)
- }
- for _, cs := range cc.streams {
- select {
- case cs.resc <- resAndError{err: err}:
- default:
- }
- close(cs.done)
- }
- cc.closed = true
- cc.cond.Broadcast()
- cc.mu.Unlock()
-}
-
-func (rl *clientConnReadLoop) run() error {
- cc := rl.cc
- rl.closeWhenIdle = cc.t.disableKeepAlives()
- gotReply := false // ever saw a reply
- for {
- f, err := cc.fr.ReadFrame()
- if err != nil {
- cc.vlogf("Transport readFrame error: (%T) %v", err, err)
- }
- if se, ok := err.(StreamError); ok {
- if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
- rl.endStreamError(cs, cc.fr.errDetail)
- }
- continue
- } else if err != nil {
- return err
- }
- if VerboseLogs {
- cc.vlogf("http2: Transport received %s", summarizeFrame(f))
- }
- maybeIdle := false // whether frame might transition us to idle
-
- switch f := f.(type) {
- case *MetaHeadersFrame:
- err = rl.processHeaders(f)
- maybeIdle = true
- gotReply = true
- case *DataFrame:
- err = rl.processData(f)
- maybeIdle = true
- case *GoAwayFrame:
- err = rl.processGoAway(f)
- maybeIdle = true
- case *RSTStreamFrame:
- err = rl.processResetStream(f)
- maybeIdle = true
- case *SettingsFrame:
- err = rl.processSettings(f)
- case *PushPromiseFrame:
- err = rl.processPushPromise(f)
- case *WindowUpdateFrame:
- err = rl.processWindowUpdate(f)
- case *PingFrame:
- err = rl.processPing(f)
- default:
- cc.logf("Transport: unhandled response frame type %T", f)
- }
- if err != nil {
- return err
- }
- if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
- cc.closeIfIdle()
- }
- }
-}
-
-func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
- if cs == nil {
- // We'd get here if we canceled a request while the
- // server had its response still in flight. So if this
- // was just something we canceled, ignore it.
- return nil
- }
- if !cs.pastHeaders {
- cs.pastHeaders = true
- } else {
- return rl.processTrailers(cs, f)
- }
-
- res, err := rl.handleResponse(cs, f)
- if err != nil {
- if _, ok := err.(ConnectionError); ok {
- return err
- }
- // Any other error type is a stream error.
- cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
- cs.resc <- resAndError{err: err}
- return nil // return nil from process* funcs to keep conn alive
- }
- if res == nil {
- // (nil, nil) special case. See handleResponse docs.
- return nil
- }
- if res.Body != noBody {
- rl.activeRes[cs.ID] = cs
- }
- cs.resTrailer = &res.Trailer
- cs.resc <- resAndError{res: res}
- return nil
-}
-
-// may return error types nil, or ConnectionError. Any other error value
-// is a StreamError of type ErrCodeProtocol. The returned error in that case
-// is the detail.
-//
-// As a special case, handleResponse may return (nil, nil) to skip the
-// frame (currently only used for 100 expect continue). This special
-// case is going away after Issue 13851 is fixed.
-func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
- if f.Truncated {
- return nil, errResponseHeaderListSize
- }
-
- status := f.PseudoValue("status")
- if status == "" {
- return nil, errors.New("missing status pseudo header")
- }
- statusCode, err := strconv.Atoi(status)
- if err != nil {
- return nil, errors.New("malformed non-numeric status pseudo header")
- }
-
- if statusCode == 100 {
- // Just skip 100-continue response headers for now.
- // TODO: golang.org/issue/13851 for doing it properly.
- cs.pastHeaders = false // do it all again
- return nil, nil
- }
-
- header := make(http.Header)
- res := &http.Response{
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- Header: header,
- StatusCode: statusCode,
- Status: status + " " + http.StatusText(statusCode),
- }
- for _, hf := range f.RegularFields() {
- key := http.CanonicalHeaderKey(hf.Name)
- if key == "Trailer" {
- t := res.Trailer
- if t == nil {
- t = make(http.Header)
- res.Trailer = t
- }
- foreachHeaderElement(hf.Value, func(v string) {
- t[http.CanonicalHeaderKey(v)] = nil
- })
- } else {
- header[key] = append(header[key], hf.Value)
- }
- }
-
- streamEnded := f.StreamEnded()
- if !streamEnded || cs.req.Method == "HEAD" {
- res.ContentLength = -1
- if clens := res.Header["Content-Length"]; len(clens) == 1 {
- if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
- res.ContentLength = clen64
- } else {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- } else if len(clens) > 1 {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- }
-
- if streamEnded {
- res.Body = noBody
- return res, nil
- }
-
- buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
- cs.bufPipe = pipe{b: buf}
- cs.bytesRemain = res.ContentLength
- res.Body = transportResponseBody{cs}
- go cs.awaitRequestCancel(requestCancel(cs.req))
-
- if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
- res.Header.Del("Content-Encoding")
- res.Header.Del("Content-Length")
- res.ContentLength = -1
- res.Body = &gzipReader{body: res.Body}
- }
- return res, nil
-}
-
-func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
- if cs.pastTrailers {
- // Too many HEADERS frames for this stream.
- return ConnectionError(ErrCodeProtocol)
- }
- cs.pastTrailers = true
- if !f.StreamEnded() {
- // We expect that any headers for trailers also
- // has END_STREAM.
- return ConnectionError(ErrCodeProtocol)
- }
- if len(f.PseudoFields()) > 0 {
- // No pseudo header fields are defined for trailers.
- // TODO: ConnectionError might be overly harsh? Check.
- return ConnectionError(ErrCodeProtocol)
- }
-
- trailer := make(http.Header)
- for _, hf := range f.RegularFields() {
- key := http.CanonicalHeaderKey(hf.Name)
- trailer[key] = append(trailer[key], hf.Value)
- }
- cs.trailer = trailer
-
- rl.endStream(cs)
- return nil
-}
-
-// transportResponseBody is the concrete type of Transport.RoundTrip's
-// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
-// On Close it sends RST_STREAM if EOF wasn't already seen.
-type transportResponseBody struct {
- cs *clientStream
-}
-
-func (b transportResponseBody) Read(p []byte) (n int, err error) {
- cs := b.cs
- cc := cs.cc
-
- if cs.readErr != nil {
- return 0, cs.readErr
- }
- n, err = b.cs.bufPipe.Read(p)
- if cs.bytesRemain != -1 {
- if int64(n) > cs.bytesRemain {
- n = int(cs.bytesRemain)
- if err == nil {
- err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
- cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
- }
- cs.readErr = err
- return int(cs.bytesRemain), err
- }
- cs.bytesRemain -= int64(n)
- if err == io.EOF && cs.bytesRemain > 0 {
- err = io.ErrUnexpectedEOF
- cs.readErr = err
- return n, err
- }
- }
- if n == 0 {
- // No flow control tokens to send back.
- return
- }
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- var connAdd, streamAdd int32
- // Check the conn-level first, before the stream-level.
- if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
- connAdd = transportDefaultConnFlow - v
- cc.inflow.add(connAdd)
- }
- if err == nil { // No need to refresh if the stream is over or failed.
- if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
- streamAdd = transportDefaultStreamFlow - v
- cs.inflow.add(streamAdd)
- }
- }
- if connAdd != 0 || streamAdd != 0 {
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if connAdd != 0 {
- cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
- }
- if streamAdd != 0 {
- cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
- }
- cc.bw.Flush()
- }
- return
-}
-
-var errClosedResponseBody = errors.New("http2: response body closed")
-
-func (b transportResponseBody) Close() error {
- cs := b.cs
- if cs.bufPipe.Err() != io.EOF {
- // TODO: write test for this
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- }
- cs.bufPipe.BreakWithError(errClosedResponseBody)
- return nil
-}
-
-func (rl *clientConnReadLoop) processData(f *DataFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
- if cs == nil {
- cc.mu.Lock()
- neverSent := cc.nextStreamID
- cc.mu.Unlock()
- if f.StreamID >= neverSent {
- // We never asked for this.
- cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
- return ConnectionError(ErrCodeProtocol)
- }
- // We probably did ask for this, but canceled. Just ignore it.
- // TODO: be stricter here? only silently ignore things which
- // we canceled, but not things which were closed normally
- // by the peer? Tough without accumulating too much state.
- return nil
- }
- if data := f.Data(); len(data) > 0 {
- if cs.bufPipe.b == nil {
- // Data frame after it's already closed?
- cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
- return ConnectionError(ErrCodeProtocol)
- }
-
- // Check connection-level flow control.
- cc.mu.Lock()
- if cs.inflow.available() >= int32(len(data)) {
- cs.inflow.take(int32(len(data)))
- } else {
- cc.mu.Unlock()
- return ConnectionError(ErrCodeFlowControl)
- }
- cc.mu.Unlock()
-
- if _, err := cs.bufPipe.Write(data); err != nil {
- rl.endStreamError(cs, err)
- return err
- }
- }
-
- if f.StreamEnded() {
- rl.endStream(cs)
- }
- return nil
-}
-
-var errInvalidTrailers = errors.New("http2: invalid trailers")
-
-func (rl *clientConnReadLoop) endStream(cs *clientStream) {
- // TODO: check that any declared content-length matches, like
- // server.go's (*stream).endStream method.
- rl.endStreamError(cs, nil)
-}
-
-func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
- var code func()
- if err == nil {
- err = io.EOF
- code = cs.copyTrailers
- }
- cs.bufPipe.closeWithErrorAndCode(err, code)
- delete(rl.activeRes, cs.ID)
- if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
- rl.closeWhenIdle = true
- }
-}
-
-func (cs *clientStream) copyTrailers() {
- for k, vv := range cs.trailer {
- t := cs.resTrailer
- if *t == nil {
- *t = make(http.Header)
- }
- (*t)[k] = vv
- }
-}
-
-func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
- cc := rl.cc
- cc.t.connPool().MarkDead(cc)
- if f.ErrCode != 0 {
- // TODO: deal with GOAWAY more. particularly the error code
- cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
- }
- cc.setGoAway(f)
- return nil
-}
-
-func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
- cc := rl.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return f.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- case SettingInitialWindowSize:
- // TODO: error if this is too large.
-
- // TODO: adjust flow control of still-open
- // frames by the difference of the old initial
- // window size and this one.
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
- cc.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
-}
-
-func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
- if f.StreamID != 0 && cs == nil {
- return nil
- }
-
- cc.mu.Lock()
- defer cc.mu.Unlock()
-
- fl := &cc.flow
- if cs != nil {
- fl = &cs.flow
- }
- if !fl.add(int32(f.Increment)) {
- return ConnectionError(ErrCodeFlowControl)
- }
- cc.cond.Broadcast()
- return nil
-}
-
-func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.cc.streamByID(f.StreamID, true)
- if cs == nil {
- // TODO: return error if server tries to RST_STEAM an idle stream
- return nil
- }
- select {
- case <-cs.peerReset:
- // Already reset.
- // This is the only goroutine
- // which closes this, so there
- // isn't a race.
- default:
- err := StreamError{cs.ID, f.ErrCode}
- cs.resetErr = err
- close(cs.peerReset)
- cs.bufPipe.CloseWithError(err)
- cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl
- }
- delete(rl.activeRes, cs.ID)
- return nil
-}
-
-func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
- if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
- return nil
- }
- cc := rl.cc
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(true, f.Data); err != nil {
- return err
- }
- return cc.bw.Flush()
-}
-
-func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
- // We told the peer we don't want them.
- // Spec says:
- // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
- // setting of the peer endpoint is set to 0. An endpoint that
- // has set this setting and has received acknowledgement MUST
- // treat the receipt of a PUSH_PROMISE frame as a connection
- // error (Section 5.4.1) of type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
-}
-
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
- // TODO: do something with err? send it as a debug frame to the peer?
- // But that's only in GOAWAY. Invent a new frame type? Is there one already?
- cc.wmu.Lock()
- cc.fr.WriteRSTStream(streamID, code)
- cc.bw.Flush()
- cc.wmu.Unlock()
-}
-
-var (
- errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
- errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
-)
-
-func (cc *ClientConn) logf(format string, args ...interface{}) {
- cc.t.logf(format, args...)
-}
-
-func (cc *ClientConn) vlogf(format string, args ...interface{}) {
- cc.t.vlogf(format, args...)
-}
-
-func (t *Transport) vlogf(format string, args ...interface{}) {
- if VerboseLogs {
- t.logf(format, args...)
- }
-}
-
-func (t *Transport) logf(format string, args ...interface{}) {
- log.Printf(format, args...)
-}
-
-var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
-
-func strSliceContains(ss []string, s string) bool {
- for _, v := range ss {
- if v == s {
- return true
- }
- }
- return false
-}
-
-type erringRoundTripper struct{ err error }
-
-func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
-
-// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
-type gzipReader struct {
- body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
-}
-
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
- if gz.zerr != nil {
- return 0, gz.zerr
- }
- if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
- }
- }
- return gz.zr.Read(p)
-}
-
-func (gz *gzipReader) Close() error {
- return gz.body.Close()
-}
-
-type errorReader struct{ err error }
-
-func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go
deleted file mode 100644
index 0143b24cd3..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "bytes"
- "fmt"
- "log"
- "net/http"
- "time"
-
- "golang.org/x/net/http2/hpack"
-)
-
-// writeFramer is implemented by any type that is used to write frames.
-type writeFramer interface {
- writeFrame(writeContext) error
-}
-
-// writeContext is the interface needed by the various frame writer
-// types below. All the writeFrame methods below are scheduled via the
-// frame writing scheduler (see writeScheduler in writesched.go).
-//
-// This interface is implemented by *serverConn.
-//
-// TODO: decide whether to a) use this in the client code (which didn't
-// end up using this yet, because it has a simpler design, not
-// currently implementing priorities), or b) delete this and
-// make the server code a bit more concrete.
-type writeContext interface {
- Framer() *Framer
- Flush() error
- CloseConn() error
- // HeaderEncoder returns an HPACK encoder that writes to the
- // returned buffer.
- HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
-}
-
-// endsStream reports whether the given frame writer w will locally
-// close the stream.
-func endsStream(w writeFramer) bool {
- switch v := w.(type) {
- case *writeData:
- return v.endStream
- case *writeResHeaders:
- return v.endStream
- case nil:
- // This can only happen if the caller reuses w after it's
- // been intentionally nil'ed out to prevent use. Keep this
- // here to catch future refactoring breaking it.
- panic("endsStream called on nil writeFramer")
- }
- return false
-}
-
-type flushFrameWriter struct{}
-
-func (flushFrameWriter) writeFrame(ctx writeContext) error {
- return ctx.Flush()
-}
-
-type writeSettings []Setting
-
-func (s writeSettings) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteSettings([]Setting(s)...)
-}
-
-type writeGoAway struct {
- maxStreamID uint32
- code ErrCode
-}
-
-func (p *writeGoAway) writeFrame(ctx writeContext) error {
- err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
- if p.code != 0 {
- ctx.Flush() // ignore error: we're hanging up on them anyway
- time.Sleep(50 * time.Millisecond)
- ctx.CloseConn()
- }
- return err
-}
-
-type writeData struct {
- streamID uint32
- p []byte
- endStream bool
-}
-
-func (w *writeData) String() string {
- return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
-}
-
-func (w *writeData) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
-}
-
-// handlerPanicRST is the message sent from handler goroutines when
-// the handler panics.
-type handlerPanicRST struct {
- StreamID uint32
-}
-
-func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
-}
-
-func (se StreamError) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
-}
-
-type writePingAck struct{ pf *PingFrame }
-
-func (w writePingAck) writeFrame(ctx writeContext) error {
- return ctx.Framer().WritePing(true, w.pf.Data)
-}
-
-type writeSettingsAck struct{}
-
-func (writeSettingsAck) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteSettingsAck()
-}
-
-// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
-// for HTTP response headers or trailers from a server handler.
-type writeResHeaders struct {
- streamID uint32
- httpResCode int // 0 means no ":status" line
- h http.Header // may be nil
- trailers []string // if non-nil, which keys of h to write. nil means all.
- endStream bool
-
- date string
- contentType string
- contentLength string
-}
-
-func encKV(enc *hpack.Encoder, k, v string) {
- if VerboseLogs {
- log.Printf("http2: server encoding header %q = %q", k, v)
- }
- enc.WriteField(hpack.HeaderField{Name: k, Value: v})
-}
-
-func (w *writeResHeaders) writeFrame(ctx writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
-
- if w.httpResCode != 0 {
- encKV(enc, ":status", httpCodeString(w.httpResCode))
- }
-
- encodeHeaders(enc, w.h, w.trailers)
-
- if w.contentType != "" {
- encKV(enc, "content-type", w.contentType)
- }
- if w.contentLength != "" {
- encKV(enc, "content-length", w.contentLength)
- }
- if w.date != "" {
- encKV(enc, "date", w.date)
- }
-
- headerBlock := buf.Bytes()
- if len(headerBlock) == 0 && w.trailers == nil {
- panic("unexpected empty hpack")
- }
-
- // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
- // that all peers must support (16KB). Later we could care
- // more and send larger frames if the peer advertised it, but
- // there's little point. Most headers are small anyway (so we
- // generally won't have CONTINUATION frames), and extra frames
- // only waste 9 bytes anyway.
- const maxFrameSize = 16384
-
- first := true
- for len(headerBlock) > 0 {
- frag := headerBlock
- if len(frag) > maxFrameSize {
- frag = frag[:maxFrameSize]
- }
- headerBlock = headerBlock[len(frag):]
- endHeaders := len(headerBlock) == 0
- var err error
- if first {
- first = false
- err = ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: frag,
- EndStream: w.endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-type write100ContinueHeadersFrame struct {
- streamID uint32
-}
-
-func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
- enc, buf := ctx.HeaderEncoder()
- buf.Reset()
- encKV(enc, ":status", "100")
- return ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: buf.Bytes(),
- EndStream: false,
- EndHeaders: true,
- })
-}
-
-type writeWindowUpdate struct {
- streamID uint32 // or 0 for conn-level
- n uint32
-}
-
-func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
- return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
-}
-
-func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
- if keys == nil {
- sorter := sorterPool.Get().(*sorter)
- // Using defer here, since the returned keys from the
- // sorter.Keys method is only valid until the sorter
- // is returned:
- defer sorterPool.Put(sorter)
- keys = sorter.Keys(h)
- }
- for _, k := range keys {
- vv := h[k]
- k = lowerHeader(k)
- if !validHeaderFieldName(k) {
- // TODO: return an error? golang.org/issue/14048
- // For now just omit it.
- continue
- }
- isTE := k == "transfer-encoding"
- for _, v := range vv {
- if !validHeaderFieldValue(v) {
- // TODO: return an error? golang.org/issue/14048
- // For now just omit it.
- continue
- }
- // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
- if isTE && v != "trailers" {
- continue
- }
- encKV(enc, k, v)
- }
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go
deleted file mode 100644
index c24316ce7b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import "fmt"
-
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
- // write is the interface value that does the writing, once the
- // writeScheduler (below) has decided to select this frame
- // to write. The write functions are all defined in write.go.
- write writeFramer
-
- stream *stream // used for prioritization. nil for non-stream frames.
-
- // done, if non-nil, must be a buffered channel with space for
- // 1 message and is sent the return value from write (or an
- // earlier error) when the frame has been written.
- done chan error
-}
-
-// for debugging only:
-func (wm frameWriteMsg) String() string {
- var streamID uint32
- if wm.stream != nil {
- streamID = wm.stream.id
- }
- var des string
- if s, ok := wm.write.(fmt.Stringer); ok {
- des = s.String()
- } else {
- des = fmt.Sprintf("%T", wm.write)
- }
- return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
- // zero are frames not associated with a specific stream.
- // They're sent before any stream-specific freams.
- zero writeQueue
-
- // maxFrameSize is the maximum size of a DATA frame
- // we'll write. Must be non-zero and between 16K-16M.
- maxFrameSize uint32
-
- // sq contains the stream-specific queues, keyed by stream ID.
- // when a stream is idle, it's deleted from the map.
- sq map[uint32]*writeQueue
-
- // canSend is a slice of memory that's reused between frame
- // scheduling decisions to hold the list of writeQueues (from sq)
- // which have enough flow control data to send. After canSend is
- // built, the best is selected.
- canSend []*writeQueue
-
- // pool of empty queues for reuse.
- queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
- if len(q.s) != 0 {
- panic("queue must be empty")
- }
- ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
- ln := len(ws.queuePool)
- if ln == 0 {
- return new(writeQueue)
- }
- q := ws.queuePool[ln-1]
- ws.queuePool = ws.queuePool[:ln-1]
- return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
- st := wm.stream
- if st == nil {
- ws.zero.push(wm)
- } else {
- ws.streamQueue(st.id).push(wm)
- }
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
- if q, ok := ws.sq[streamID]; ok {
- return q
- }
- if ws.sq == nil {
- ws.sq = make(map[uint32]*writeQueue)
- }
- q := ws.getEmptyQueue()
- ws.sq[streamID] = q
- return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
- if ws.maxFrameSize == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
-
- // If there any frames not associated with streams, prefer those first.
- // These are usually SETTINGS, etc.
- if !ws.zero.empty() {
- return ws.zero.shift(), true
- }
- if len(ws.sq) == 0 {
- return
- }
-
- // Next, prioritize frames on streams that aren't DATA frames (no cost).
- for id, q := range ws.sq {
- if q.firstIsNoCost() {
- return ws.takeFrom(id, q)
- }
- }
-
- // Now, all that remains are DATA frames with non-zero bytes to
- // send. So pick the best one.
- if len(ws.canSend) != 0 {
- panic("should be empty")
- }
- for _, q := range ws.sq {
- if n := ws.streamWritableBytes(q); n > 0 {
- ws.canSend = append(ws.canSend, q)
- }
- }
- if len(ws.canSend) == 0 {
- return
- }
- defer ws.zeroCanSend()
-
- // TODO: find the best queue
- q := ws.canSend[0]
-
- return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
- for i := range ws.canSend {
- ws.canSend[i] = nil
- }
- ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
- wm := q.head()
- ret := wm.stream.flow.available() // max we can write
- if ret == 0 {
- return 0
- }
- if int32(ws.maxFrameSize) < ret {
- ret = int32(ws.maxFrameSize)
- }
- if ret == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
- wd := wm.write.(*writeData)
- if len(wd.p) < int(ret) {
- ret = int32(len(wd.p))
- }
- return ret
-}
-
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
- wm = q.head()
- // If the first item in this queue costs flow control tokens
- // and we don't have enough, write as much as we can.
- if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
- allowed := wm.stream.flow.available() // max we can write
- if allowed == 0 {
- // No quota available. Caller can try the next stream.
- return frameWriteMsg{}, false
- }
- if int32(ws.maxFrameSize) < allowed {
- allowed = int32(ws.maxFrameSize)
- }
- // TODO: further restrict the allowed size, because even if
- // the peer says it's okay to write 16MB data frames, we might
- // want to write smaller ones to properly weight competing
- // streams' priorities.
-
- if len(wd.p) > int(allowed) {
- wm.stream.flow.take(allowed)
- chunk := wd.p[:allowed]
- wd.p = wd.p[allowed:]
- // Make up a new write message of a valid size, rather
- // than shifting one off the queue.
- return frameWriteMsg{
- stream: wm.stream,
- write: &writeData{
- streamID: wd.streamID,
- p: chunk,
- // even if the original had endStream set, there
- // arebytes remaining because len(wd.p) > allowed,
- // so we know endStream is false:
- endStream: false,
- },
- // our caller is blocking on the final DATA frame, not
- // these intermediates, so no need to wait:
- done: nil,
- }, true
- }
- wm.stream.flow.take(int32(len(wd.p)))
- }
-
- q.shift()
- if q.empty() {
- ws.putEmptyQueue(q)
- delete(ws.sq, id)
- }
- return wm, true
-}
-
-func (ws *writeScheduler) forgetStream(id uint32) {
- q, ok := ws.sq[id]
- if !ok {
- return
- }
- delete(ws.sq, id)
-
- // But keep it for others later.
- for i := range q.s {
- q.s[i] = frameWriteMsg{}
- }
- q.s = q.s[:0]
- ws.putEmptyQueue(q)
-}
-
-type writeQueue struct {
- s []frameWriteMsg
-}
-
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
-
-func (q *writeQueue) push(wm frameWriteMsg) {
- q.s = append(q.s, wm)
-}
-
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- return q.s[0]
-}
-
-func (q *writeQueue) shift() frameWriteMsg {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- wm := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = frameWriteMsg{}
- q.s = q.s[:len(q.s)-1]
- return wm
-}
-
-func (q *writeQueue) firstIsNoCost() bool {
- if df, ok := q.s[0].write.(*writeData); ok {
- return len(df.p) == 0
- }
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go
deleted file mode 100644
index 3f90b7300d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go
+++ /dev/null
@@ -1,525 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package timeseries implements a time series structure for stats collection.
-package timeseries
-
-import (
- "fmt"
- "log"
- "time"
-)
-
-const (
- timeSeriesNumBuckets = 64
- minuteHourSeriesNumBuckets = 60
-)
-
-var timeSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 10 * time.Second,
- 1 * time.Minute,
- 10 * time.Minute,
- 1 * time.Hour,
- 6 * time.Hour,
- 24 * time.Hour, // 1 day
- 7 * 24 * time.Hour, // 1 week
- 4 * 7 * 24 * time.Hour, // 4 weeks
- 16 * 7 * 24 * time.Hour, // 16 weeks
-}
-
-var minuteHourSeriesResolutions = []time.Duration{
- 1 * time.Second,
- 1 * time.Minute,
-}
-
-// An Observable is a kind of data that can be aggregated in a time series.
-type Observable interface {
- Multiply(ratio float64) // Multiplies the data in self by a given ratio
- Add(other Observable) // Adds the data from a different observation to self
- Clear() // Clears the observation so it can be reused.
- CopyFrom(other Observable) // Copies the contents of a given observation to self
-}
-
-// Float attaches the methods of Observable to a float64.
-type Float float64
-
-// NewFloat returns a Float.
-func NewFloat() Observable {
- f := Float(0)
- return &f
-}
-
-// String returns the float as a string.
-func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
-
-// Value returns the float's value.
-func (f *Float) Value() float64 { return float64(*f) }
-
-func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
-
-func (f *Float) Add(other Observable) {
- o := other.(*Float)
- *f += *o
-}
-
-func (f *Float) Clear() { *f = 0 }
-
-func (f *Float) CopyFrom(other Observable) {
- o := other.(*Float)
- *f = *o
-}
-
-// A Clock tells the current time.
-type Clock interface {
- Time() time.Time
-}
-
-type defaultClock int
-
-var defaultClockInstance defaultClock
-
-func (defaultClock) Time() time.Time { return time.Now() }
-
-// Information kept per level. Each level consists of a circular list of
-// observations. The start of the level may be derived from end and the
-// len(buckets) * sizeInMillis.
-type tsLevel struct {
- oldest int // index to oldest bucketed Observable
- newest int // index to newest bucketed Observable
- end time.Time // end timestamp for this level
- size time.Duration // duration of the bucketed Observable
- buckets []Observable // collections of observations
- provider func() Observable // used for creating new Observable
-}
-
-func (l *tsLevel) Clear() {
- l.oldest = 0
- l.newest = len(l.buckets) - 1
- l.end = time.Time{}
- for i := range l.buckets {
- if l.buckets[i] != nil {
- l.buckets[i].Clear()
- l.buckets[i] = nil
- }
- }
-}
-
-func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
- l.size = size
- l.provider = f
- l.buckets = make([]Observable, numBuckets)
-}
-
-// Keeps a sequence of levels. Each level is responsible for storing data at
-// a given resolution. For example, the first level stores data at a one
-// minute resolution while the second level stores data at a one hour
-// resolution.
-
-// Each level is represented by a sequence of buckets. Each bucket spans an
-// interval equal to the resolution of the level. New observations are added
-// to the last bucket.
-type timeSeries struct {
- provider func() Observable // make more Observable
- numBuckets int // number of buckets in each level
- levels []*tsLevel // levels of bucketed Observable
- lastAdd time.Time // time of last Observable tracked
- total Observable // convenient aggregation of all Observable
- clock Clock // Clock for getting current time
- pending Observable // observations not yet bucketed
- pendingTime time.Time // what time are we keeping in pending
- dirty bool // if there are pending observations
-}
-
-// init initializes a level according to the supplied criteria.
-func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
- ts.provider = f
- ts.numBuckets = numBuckets
- ts.clock = clock
- ts.levels = make([]*tsLevel, len(resolutions))
-
- for i := range resolutions {
- if i > 0 && resolutions[i-1] >= resolutions[i] {
- log.Print("timeseries: resolutions must be monotonically increasing")
- break
- }
- newLevel := new(tsLevel)
- newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
- ts.levels[i] = newLevel
- }
-
- ts.Clear()
-}
-
-// Clear removes all observations from the time series.
-func (ts *timeSeries) Clear() {
- ts.lastAdd = time.Time{}
- ts.total = ts.resetObservation(ts.total)
- ts.pending = ts.resetObservation(ts.pending)
- ts.pendingTime = time.Time{}
- ts.dirty = false
-
- for i := range ts.levels {
- ts.levels[i].Clear()
- }
-}
-
-// Add records an observation at the current time.
-func (ts *timeSeries) Add(observation Observable) {
- ts.AddWithTime(observation, ts.clock.Time())
-}
-
-// AddWithTime records an observation at the specified time.
-func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
-
- smallBucketDuration := ts.levels[0].size
-
- if t.After(ts.lastAdd) {
- ts.lastAdd = t
- }
-
- if t.After(ts.pendingTime) {
- ts.advance(t)
- ts.mergePendingUpdates()
- ts.pendingTime = ts.levels[0].end
- ts.pending.CopyFrom(observation)
- ts.dirty = true
- } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
- // The observation is close enough to go into the pending bucket.
- // This compensates for clock skewing and small scheduling delays
- // by letting the update stay in the fast path.
- ts.pending.Add(observation)
- ts.dirty = true
- } else {
- ts.mergeValue(observation, t)
- }
-}
-
-// mergeValue inserts the observation at the specified time in the past into all levels.
-func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
- for _, level := range ts.levels {
- index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
- if 0 <= index && index < ts.numBuckets {
- bucketNumber := (level.oldest + index) % ts.numBuckets
- if level.buckets[bucketNumber] == nil {
- level.buckets[bucketNumber] = level.provider()
- }
- level.buckets[bucketNumber].Add(observation)
- }
- }
- ts.total.Add(observation)
-}
-
-// mergePendingUpdates applies the pending updates into all levels.
-func (ts *timeSeries) mergePendingUpdates() {
- if ts.dirty {
- ts.mergeValue(ts.pending, ts.pendingTime)
- ts.pending = ts.resetObservation(ts.pending)
- ts.dirty = false
- }
-}
-
-// advance cycles the buckets at each level until the latest bucket in
-// each level can hold the time specified.
-func (ts *timeSeries) advance(t time.Time) {
- if !t.After(ts.levels[0].end) {
- return
- }
- for i := 0; i < len(ts.levels); i++ {
- level := ts.levels[i]
- if !level.end.Before(t) {
- break
- }
-
- // If the time is sufficiently far, just clear the level and advance
- // directly.
- if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
- for _, b := range level.buckets {
- ts.resetObservation(b)
- }
- level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
- }
-
- for t.After(level.end) {
- level.end = level.end.Add(level.size)
- level.newest = level.oldest
- level.oldest = (level.oldest + 1) % ts.numBuckets
- ts.resetObservation(level.buckets[level.newest])
- }
-
- t = level.end
- }
-}
-
-// Latest returns the sum of the num latest buckets from the level.
-func (ts *timeSeries) Latest(level, num int) Observable {
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- result := ts.provider()
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- if l.buckets[index] != nil {
- result.Add(l.buckets[index])
- }
- if index == 0 {
- index = ts.numBuckets
- }
- index--
- }
-
- return result
-}
-
-// LatestBuckets returns a copy of the num latest buckets from level.
-func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
- if level < 0 || level > len(ts.levels) {
- log.Print("timeseries: bad level argument: ", level)
- return nil
- }
- if num < 0 || num >= ts.numBuckets {
- log.Print("timeseries: bad num argument: ", num)
- return nil
- }
-
- results := make([]Observable, num)
- now := ts.clock.Time()
- if ts.levels[0].end.Before(now) {
- ts.advance(now)
- }
-
- ts.mergePendingUpdates()
-
- l := ts.levels[level]
- index := l.newest
-
- for i := 0; i < num; i++ {
- result := ts.provider()
- results[i] = result
- if l.buckets[index] != nil {
- result.CopyFrom(l.buckets[index])
- }
-
- if index == 0 {
- index = ts.numBuckets
- }
- index -= 1
- }
- return results
-}
-
-// ScaleBy updates observations by scaling by factor.
-func (ts *timeSeries) ScaleBy(factor float64) {
- for _, l := range ts.levels {
- for i := 0; i < ts.numBuckets; i++ {
- l.buckets[i].Multiply(factor)
- }
- }
-
- ts.total.Multiply(factor)
- ts.pending.Multiply(factor)
-}
-
-// Range returns the sum of observations added over the specified time range.
-// If start or finish times don't fall on bucket boundaries of the same
-// level, then return values are approximate answers.
-func (ts *timeSeries) Range(start, finish time.Time) Observable {
- return ts.ComputeRange(start, finish, 1)[0]
-}
-
-// Recent returns the sum of observations from the last delta.
-func (ts *timeSeries) Recent(delta time.Duration) Observable {
- now := ts.clock.Time()
- return ts.Range(now.Add(-delta), now)
-}
-
-// Total returns the total of all observations.
-func (ts *timeSeries) Total() Observable {
- ts.mergePendingUpdates()
- return ts.total
-}
-
-// ComputeRange computes a specified number of values into a slice using
-// the observations recorded over the specified time period. The return
-// values are approximate if the start or finish times don't fall on the
-// bucket boundaries at the same level or if the number of buckets spanning
-// the range is not an integral multiple of num.
-func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
- if start.After(finish) {
- log.Printf("timeseries: start > finish, %v>%v", start, finish)
- return nil
- }
-
- if num < 0 {
- log.Printf("timeseries: num < 0, %v", num)
- return nil
- }
-
- results := make([]Observable, num)
-
- for _, l := range ts.levels {
- if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
- ts.extract(l, start, finish, num, results)
- return results
- }
- }
-
- // Failed to find a level that covers the desired range. So just
- // extract from the last level, even if it doesn't cover the entire
- // desired range.
- ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
-
- return results
-}
-
-// RecentList returns the specified number of values in slice over the most
-// recent time period of the specified range.
-func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
- if delta < 0 {
- return nil
- }
- now := ts.clock.Time()
- return ts.ComputeRange(now.Add(-delta), now, num)
-}
-
-// extract returns a slice of specified number of observations from a given
-// level over a given range.
-func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
- ts.mergePendingUpdates()
-
- srcInterval := l.size
- dstInterval := finish.Sub(start) / time.Duration(num)
- dstStart := start
- srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
-
- srcIndex := 0
-
- // Where should scanning start?
- if dstStart.After(srcStart) {
- advance := dstStart.Sub(srcStart) / srcInterval
- srcIndex += int(advance)
- srcStart = srcStart.Add(advance * srcInterval)
- }
-
- // The i'th value is computed as show below.
- // interval = (finish/start)/num
- // i'th value = sum of observation in range
- // [ start + i * interval,
- // start + (i + 1) * interval )
- for i := 0; i < num; i++ {
- results[i] = ts.resetObservation(results[i])
- dstEnd := dstStart.Add(dstInterval)
- for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
- srcEnd := srcStart.Add(srcInterval)
- if srcEnd.After(ts.lastAdd) {
- srcEnd = ts.lastAdd
- }
-
- if !srcEnd.Before(dstStart) {
- srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
- if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
- // dst completely contains src.
- if srcValue != nil {
- results[i].Add(srcValue)
- }
- } else {
- // dst partially overlaps src.
- overlapStart := maxTime(srcStart, dstStart)
- overlapEnd := minTime(srcEnd, dstEnd)
- base := srcEnd.Sub(srcStart)
- fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
-
- used := ts.provider()
- if srcValue != nil {
- used.CopyFrom(srcValue)
- }
- used.Multiply(fraction)
- results[i].Add(used)
- }
-
- if srcEnd.After(dstEnd) {
- break
- }
- }
- srcIndex++
- srcStart = srcStart.Add(srcInterval)
- }
- dstStart = dstStart.Add(dstInterval)
- }
-}
-
-// resetObservation clears the content so the struct may be reused.
-func (ts *timeSeries) resetObservation(observation Observable) Observable {
- if observation == nil {
- observation = ts.provider()
- } else {
- observation.Clear()
- }
- return observation
-}
-
-// TimeSeries tracks data at granularities from 1 second to 16 weeks.
-type TimeSeries struct {
- timeSeries
-}
-
-// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
-func NewTimeSeries(f func() Observable) *TimeSeries {
- return NewTimeSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
- ts := new(TimeSeries)
- ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
- return ts
-}
-
-// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
-type MinuteHourSeries struct {
- timeSeries
-}
-
-// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
-func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
- return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
-}
-
-// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
-// assigning timestamps.
-func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
- ts := new(MinuteHourSeries)
- ts.timeSeries.init(minuteHourSeriesResolutions, f,
- minuteHourSeriesNumBuckets, clock)
- return ts
-}
-
-func (ts *MinuteHourSeries) Minute() Observable {
- return ts.timeSeries.Latest(0, 60)
-}
-
-func (ts *MinuteHourSeries) Hour() Observable {
- return ts.timeSeries.Latest(1, 60)
-}
-
-func minTime(a, b time.Time) time.Time {
- if a.Before(b) {
- return a
- }
- return b
-}
-
-func maxTime(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go
deleted file mode 100644
index e66c7e3282..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go
+++ /dev/null
@@ -1,524 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "io"
- "log"
- "net/http"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "text/tabwriter"
- "time"
-)
-
-var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "trimSpace": strings.TrimSpace,
-}).Parse(eventsHTML))
-
-const maxEventsPerLog = 100
-
-type bucket struct {
- MaxErrAge time.Duration
- String string
-}
-
-var buckets = []bucket{
- {0, "total"},
- {10 * time.Second, "errs<10s"},
- {1 * time.Minute, "errs<1m"},
- {10 * time.Minute, "errs<10m"},
- {1 * time.Hour, "errs<1h"},
- {10 * time.Hour, "errs<10h"},
- {24000 * time.Hour, "errors"},
-}
-
-// RenderEvents renders the HTML page typically served at /debug/events.
-// It does not do any auth checking; see AuthRequest for the default auth check
-// used by the handler registered on http.DefaultServeMux.
-// req may be nil.
-func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
- now := time.Now()
- data := &struct {
- Families []string // family names
- Buckets []bucket
- Counts [][]int // eventLog count per family/bucket
-
- // Set when a bucket has been selected.
- Family string
- Bucket int
- EventLogs eventLogs
- Expanded bool
- }{
- Buckets: buckets,
- }
-
- data.Families = make([]string, 0, len(families))
- famMu.RLock()
- for name := range families {
- data.Families = append(data.Families, name)
- }
- famMu.RUnlock()
- sort.Strings(data.Families)
-
- // Count the number of eventLogs in each family for each error age.
- data.Counts = make([][]int, len(data.Families))
- for i, name := range data.Families {
- // TODO(sameer): move this loop under the family lock.
- f := getEventFamily(name)
- data.Counts[i] = make([]int, len(data.Buckets))
- for j, b := range data.Buckets {
- data.Counts[i][j] = f.Count(now, b.MaxErrAge)
- }
- }
-
- if req != nil {
- var ok bool
- data.Family, data.Bucket, ok = parseEventsArgs(req)
- if !ok {
- // No-op
- } else {
- data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
- }
- if data.EventLogs != nil {
- defer data.EventLogs.Free()
- sort.Sort(data.EventLogs)
- }
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- }
-
- famMu.RLock()
- defer famMu.RUnlock()
- if err := eventsTmpl.Execute(w, data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < 0 || b >= len(buckets) {
- return "", 0, false
- }
- return fam, b, true
-}
-
-// An EventLog provides a log of events associated with a specific object.
-type EventLog interface {
- // Printf formats its arguments with fmt.Sprintf and adds the
- // result to the event log.
- Printf(format string, a ...interface{})
-
- // Errorf is like Printf, but it marks this event as an error.
- Errorf(format string, a ...interface{})
-
- // Finish declares that this event log is complete.
- // The event log should not be used after calling this method.
- Finish()
-}
-
-// NewEventLog returns a new EventLog with the specified family name
-// and title.
-func NewEventLog(family, title string) EventLog {
- el := newEventLog()
- el.ref()
- el.Family, el.Title = family, title
- el.Start = time.Now()
- el.events = make([]logEntry, 0, maxEventsPerLog)
- el.stack = make([]uintptr, 32)
- n := runtime.Callers(2, el.stack)
- el.stack = el.stack[:n]
-
- getEventFamily(family).add(el)
- return el
-}
-
-func (el *eventLog) Finish() {
- getEventFamily(el.Family).remove(el)
- el.unref() // matches ref in New
-}
-
-var (
- famMu sync.RWMutex
- families = make(map[string]*eventFamily) // family name => family
-)
-
-func getEventFamily(fam string) *eventFamily {
- famMu.Lock()
- defer famMu.Unlock()
- f := families[fam]
- if f == nil {
- f = &eventFamily{}
- families[fam] = f
- }
- return f
-}
-
-type eventFamily struct {
- mu sync.RWMutex
- eventLogs eventLogs
-}
-
-func (f *eventFamily) add(el *eventLog) {
- f.mu.Lock()
- f.eventLogs = append(f.eventLogs, el)
- f.mu.Unlock()
-}
-
-func (f *eventFamily) remove(el *eventLog) {
- f.mu.Lock()
- defer f.mu.Unlock()
- for i, el0 := range f.eventLogs {
- if el == el0 {
- copy(f.eventLogs[i:], f.eventLogs[i+1:])
- f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
- return
- }
- }
-}
-
-func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- n++
- }
- }
- return
-}
-
-func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
- f.mu.RLock()
- defer f.mu.RUnlock()
- els = make(eventLogs, 0, len(f.eventLogs))
- for _, el := range f.eventLogs {
- if el.hasRecentError(now, maxErrAge) {
- el.ref()
- els = append(els, el)
- }
- }
- return
-}
-
-type eventLogs []*eventLog
-
-// Free calls unref on each element of the list.
-func (els eventLogs) Free() {
- for _, el := range els {
- el.unref()
- }
-}
-
-// eventLogs may be sorted in reverse chronological order.
-func (els eventLogs) Len() int { return len(els) }
-func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
-func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
-
-// A logEntry is a timestamped log entry in an event log.
-type logEntry struct {
- When time.Time
- Elapsed time.Duration // since previous event in log
- NewDay bool // whether this event is on a different day to the previous event
- What string
- IsErr bool
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e logEntry) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// An eventLog represents an active event log.
-type eventLog struct {
- // Family is the top-level grouping of event logs to which this belongs.
- Family string
-
- // Title is the title of this event log.
- Title string
-
- // Timing information.
- Start time.Time
-
- // Call stack where this event log was created.
- stack []uintptr
-
- // Append-only sequence of events.
- //
- // TODO(sameer): change this to a ring buffer to avoid the array copy
- // when we hit maxEventsPerLog.
- mu sync.RWMutex
- events []logEntry
- LastErrorTime time.Time
- discarded int
-
- refs int32 // how many buckets this is in
-}
-
-func (el *eventLog) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- el.Family = ""
- el.Title = ""
- el.Start = time.Time{}
- el.stack = nil
- el.events = nil
- el.LastErrorTime = time.Time{}
- el.discarded = 0
- el.refs = 0
-}
-
-func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
- if maxErrAge == 0 {
- return true
- }
- el.mu.RLock()
- defer el.mu.RUnlock()
- return now.Sub(el.LastErrorTime) < maxErrAge
-}
-
-// delta returns the elapsed time since the last event or the log start,
-// and whether it spans midnight.
-// L >= el.mu
-func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
- if len(el.events) == 0 {
- return t.Sub(el.Start), false
- }
- prev := el.events[len(el.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-
-}
-
-func (el *eventLog) Printf(format string, a ...interface{}) {
- el.printf(false, format, a...)
-}
-
-func (el *eventLog) Errorf(format string, a ...interface{}) {
- el.printf(true, format, a...)
-}
-
-func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
- e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
- el.mu.Lock()
- e.Elapsed, e.NewDay = el.delta(e.When)
- if len(el.events) < maxEventsPerLog {
- el.events = append(el.events, e)
- } else {
- // Discard the oldest event.
- if el.discarded == 0 {
- // el.discarded starts at two to count for the event it
- // is replacing, plus the next one that we are about to
- // drop.
- el.discarded = 2
- } else {
- el.discarded++
- }
- // TODO(sameer): if this causes allocations on a critical path,
- // change eventLog.What to be a fmt.Stringer, as in trace.go.
- el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- el.events[0].When = el.events[1].When
- copy(el.events[1:], el.events[2:])
- el.events[maxEventsPerLog-1] = e
- }
- if e.IsErr {
- el.LastErrorTime = e.When
- }
- el.mu.Unlock()
-}
-
-func (el *eventLog) ref() {
- atomic.AddInt32(&el.refs, 1)
-}
-
-func (el *eventLog) unref() {
- if atomic.AddInt32(&el.refs, -1) == 0 {
- freeEventLog(el)
- }
-}
-
-func (el *eventLog) When() string {
- return el.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (el *eventLog) ElapsedTime() string {
- elapsed := time.Since(el.Start)
- return fmt.Sprintf("%.6f", elapsed.Seconds())
-}
-
-func (el *eventLog) Stack() string {
- buf := new(bytes.Buffer)
- tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
- printStackRecord(tw, el.stack)
- tw.Flush()
- return buf.String()
-}
-
-// printStackRecord prints the function + source line information
-// for a single stack trace.
-// Adapted from runtime/pprof/pprof.go.
-func printStackRecord(w io.Writer, stk []uintptr) {
- for _, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- continue
- }
- file, line := f.FileLine(pc)
- name := f.Name()
- // Hide runtime.goexit and any runtime functions at the beginning.
- if strings.HasPrefix(name, "runtime.") {
- continue
- }
- fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
- }
-}
-
-func (el *eventLog) Events() []logEntry {
- el.mu.RLock()
- defer el.mu.RUnlock()
- return el.events
-}
-
-// freeEventLogs is a freelist of *eventLog
-var freeEventLogs = make(chan *eventLog, 1000)
-
-// newEventLog returns a event log ready to use.
-func newEventLog() *eventLog {
- select {
- case el := <-freeEventLogs:
- return el
- default:
- return new(eventLog)
- }
-}
-
-// freeEventLog adds el to freeEventLogs if there's room.
-// This is non-blocking.
-func freeEventLog(el *eventLog) {
- el.reset()
- select {
- case freeEventLogs <- el:
- default:
- }
-}
-
-const eventsHTML = `
-
-
- events
-
-
-
-
-/debug/events
-
-
-
-{{if $.EventLogs}}
-
-Family: {{$.Family}}
-
-{{if $.Expanded}}{{end}}
-[Summary]{{if $.Expanded}}{{end}}
-
-{{if not $.Expanded}}{{end}}
-[Expanded]{{if not $.Expanded}}{{end}}
-
-
- When | Elapsed |
- {{range $el := $.EventLogs}}
-
- {{$el.When}} |
- {{$el.ElapsedTime}} |
- {{$el.Title}}
- |
- {{if $.Expanded}}
-
- |
- |
- {{$el.Stack|trimSpace}} |
-
- {{range $el.Events}}
-
- {{.WhenString}} |
- {{elapsed .Elapsed}} |
- .{{if .IsErr}}E{{else}}.{{end}}. {{.What}} |
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}}
-
-
-`
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go
deleted file mode 100644
index bb42aa5320..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package trace
-
-// This file implements histogramming for RPC statistics collection.
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "log"
- "math"
-
- "golang.org/x/net/internal/timeseries"
-)
-
-const (
- bucketCount = 38
-)
-
-// histogram keeps counts of values in buckets that are spaced
-// out in powers of 2: 0-1, 2-3, 4-7...
-// histogram implements timeseries.Observable
-type histogram struct {
- sum int64 // running total of measurements
- sumOfSquares float64 // square of running total
- buckets []int64 // bucketed values for histogram
- value int // holds a single value as an optimization
- valueCount int64 // number of values recorded for single value
-}
-
-// AddMeasurement records a value measurement observation to the histogram.
-func (h *histogram) addMeasurement(value int64) {
- // TODO: assert invariant
- h.sum += value
- h.sumOfSquares += float64(value) * float64(value)
-
- bucketIndex := getBucket(value)
-
- if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
- h.value = bucketIndex
- h.valueCount++
- } else {
- h.allocateBuckets()
- h.buckets[bucketIndex]++
- }
-}
-
-func (h *histogram) allocateBuckets() {
- if h.buckets == nil {
- h.buckets = make([]int64, bucketCount)
- h.buckets[h.value] = h.valueCount
- h.value = 0
- h.valueCount = -1
- }
-}
-
-func log2(i int64) int {
- n := 0
- for ; i >= 0x100; i >>= 8 {
- n += 8
- }
- for ; i > 0; i >>= 1 {
- n += 1
- }
- return n
-}
-
-func getBucket(i int64) (index int) {
- index = log2(i) - 1
- if index < 0 {
- index = 0
- }
- if index >= bucketCount {
- index = bucketCount - 1
- }
- return
-}
-
-// Total returns the number of recorded observations.
-func (h *histogram) total() (total int64) {
- if h.valueCount >= 0 {
- total = h.valueCount
- }
- for _, val := range h.buckets {
- total += int64(val)
- }
- return
-}
-
-// Average returns the average value of recorded observations.
-func (h *histogram) average() float64 {
- t := h.total()
- if t == 0 {
- return 0
- }
- return float64(h.sum) / float64(t)
-}
-
-// Variance returns the variance of recorded observations.
-func (h *histogram) variance() float64 {
- t := float64(h.total())
- if t == 0 {
- return 0
- }
- s := float64(h.sum) / t
- return h.sumOfSquares/t - s*s
-}
-
-// StandardDeviation returns the standard deviation of recorded observations.
-func (h *histogram) standardDeviation() float64 {
- return math.Sqrt(h.variance())
-}
-
-// PercentileBoundary estimates the value that the given fraction of recorded
-// observations are less than.
-func (h *histogram) percentileBoundary(percentile float64) int64 {
- total := h.total()
-
- // Corner cases (make sure result is strictly less than Total())
- if total == 0 {
- return 0
- } else if total == 1 {
- return int64(h.average())
- }
-
- percentOfTotal := round(float64(total) * percentile)
- var runningTotal int64
-
- for i := range h.buckets {
- value := h.buckets[i]
- runningTotal += value
- if runningTotal == percentOfTotal {
- // We hit an exact bucket boundary. If the next bucket has data, it is a
- // good estimate of the value. If the bucket is empty, we interpolate the
- // midpoint between the next bucket's boundary and the next non-zero
- // bucket. If the remaining buckets are all empty, then we use the
- // boundary for the next bucket as the estimate.
- j := uint8(i + 1)
- min := bucketBoundary(j)
- if runningTotal < total {
- for h.buckets[j] == 0 {
- j++
- }
- }
- max := bucketBoundary(j)
- return min + round(float64(max-min)/2)
- } else if runningTotal > percentOfTotal {
- // The value is in this bucket. Interpolate the value.
- delta := runningTotal - percentOfTotal
- percentBucket := float64(value-delta) / float64(value)
- bucketMin := bucketBoundary(uint8(i))
- nextBucketMin := bucketBoundary(uint8(i + 1))
- bucketSize := nextBucketMin - bucketMin
- return bucketMin + round(percentBucket*float64(bucketSize))
- }
- }
- return bucketBoundary(bucketCount - 1)
-}
-
-// Median returns the estimated median of the observed values.
-func (h *histogram) median() int64 {
- return h.percentileBoundary(0.5)
-}
-
-// Add adds other to h.
-func (h *histogram) Add(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == 0 {
- // Other histogram is empty
- } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
- // Both have a single bucketed value, aggregate them
- h.valueCount += o.valueCount
- } else {
- // Two different values necessitate buckets in this histogram
- h.allocateBuckets()
- if o.valueCount >= 0 {
- h.buckets[o.value] += o.valueCount
- } else {
- for i := range h.buckets {
- h.buckets[i] += o.buckets[i]
- }
- }
- }
- h.sumOfSquares += o.sumOfSquares
- h.sum += o.sum
-}
-
-// Clear resets the histogram to an empty state, removing all observed values.
-func (h *histogram) Clear() {
- h.buckets = nil
- h.value = 0
- h.valueCount = 0
- h.sum = 0
- h.sumOfSquares = 0
-}
-
-// CopyFrom copies from other, which must be a *histogram, into h.
-func (h *histogram) CopyFrom(other timeseries.Observable) {
- o := other.(*histogram)
- if o.valueCount == -1 {
- h.allocateBuckets()
- copy(h.buckets, o.buckets)
- }
- h.sum = o.sum
- h.sumOfSquares = o.sumOfSquares
- h.value = o.value
- h.valueCount = o.valueCount
-}
-
-// Multiply scales the histogram by the specified ratio.
-func (h *histogram) Multiply(ratio float64) {
- if h.valueCount == -1 {
- for i := range h.buckets {
- h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
- }
- } else {
- h.valueCount = int64(float64(h.valueCount) * ratio)
- }
- h.sum = int64(float64(h.sum) * ratio)
- h.sumOfSquares = h.sumOfSquares * ratio
-}
-
-// New creates a new histogram.
-func (h *histogram) New() timeseries.Observable {
- r := new(histogram)
- r.Clear()
- return r
-}
-
-func (h *histogram) String() string {
- return fmt.Sprintf("%d, %f, %d, %d, %v",
- h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
-}
-
-// round returns the closest int64 to the argument
-func round(in float64) int64 {
- return int64(math.Floor(in + 0.5))
-}
-
-// bucketBoundary returns the first value in the bucket.
-func bucketBoundary(bucket uint8) int64 {
- if bucket == 0 {
- return 0
- }
- return 1 << bucket
-}
-
-// bucketData holds data about a specific bucket for use in distTmpl.
-type bucketData struct {
- Lower, Upper int64
- N int64
- Pct, CumulativePct float64
- GraphWidth int
-}
-
-// data holds data about a Distribution for use in distTmpl.
-type data struct {
- Buckets []*bucketData
- Count, Median int64
- Mean, StandardDeviation float64
-}
-
-// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
-const maxHTMLBarWidth = 350.0
-
-// newData returns data representing h for use in distTmpl.
-func (h *histogram) newData() *data {
- // Force the allocation of buckets to simplify the rendering implementation
- h.allocateBuckets()
- // We scale the bars on the right so that the largest bar is
- // maxHTMLBarWidth pixels in width.
- maxBucket := int64(0)
- for _, n := range h.buckets {
- if n > maxBucket {
- maxBucket = n
- }
- }
- total := h.total()
- barsizeMult := maxHTMLBarWidth / float64(maxBucket)
- var pctMult float64
- if total == 0 {
- pctMult = 1.0
- } else {
- pctMult = 100.0 / float64(total)
- }
-
- buckets := make([]*bucketData, len(h.buckets))
- runningTotal := int64(0)
- for i, n := range h.buckets {
- if n == 0 {
- continue
- }
- runningTotal += n
- var upperBound int64
- if i < bucketCount-1 {
- upperBound = bucketBoundary(uint8(i + 1))
- } else {
- upperBound = math.MaxInt64
- }
- buckets[i] = &bucketData{
- Lower: bucketBoundary(uint8(i)),
- Upper: upperBound,
- N: n,
- Pct: float64(n) * pctMult,
- CumulativePct: float64(runningTotal) * pctMult,
- GraphWidth: int(float64(n) * barsizeMult),
- }
- }
- return &data{
- Buckets: buckets,
- Count: total,
- Median: h.median(),
- Mean: h.average(),
- StandardDeviation: h.standardDeviation(),
- }
-}
-
-func (h *histogram) html() template.HTML {
- buf := new(bytes.Buffer)
- if err := distTmpl.Execute(buf, h.newData()); err != nil {
- buf.Reset()
- log.Printf("net/trace: couldn't execute template: %v", err)
- }
- return template.HTML(buf.String())
-}
-
-// Input: data
-var distTmpl = template.Must(template.New("distTmpl").Parse(`
-
-
- Count: {{.Count}} |
- Mean: {{printf "%.0f" .Mean}} |
- StdDev: {{printf "%.0f" .StandardDeviation}} |
- Median: {{.Median}} |
-
-
-
-
-{{range $b := .Buckets}}
-{{if $b}}
-
- [ |
- {{.Lower}}, |
- {{.Upper}}) |
- {{.N}} |
- {{printf "%#.3f" .Pct}}% |
- {{printf "%#.3f" .CumulativePct}}% |
- |
-
-{{end}}
-{{end}}
-
-`))
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go
deleted file mode 100644
index 9ee1936273..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go
+++ /dev/null
@@ -1,1062 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package trace implements tracing of requests and long-lived objects.
-It exports HTTP interfaces on /debug/requests and /debug/events.
-
-A trace.Trace provides tracing for short-lived objects, usually requests.
-A request handler might be implemented like this:
-
- func fooHandler(w http.ResponseWriter, req *http.Request) {
- tr := trace.New("mypkg.Foo", req.URL.Path)
- defer tr.Finish()
- ...
- tr.LazyPrintf("some event %q happened", str)
- ...
- if err := somethingImportant(); err != nil {
- tr.LazyPrintf("somethingImportant failed: %v", err)
- tr.SetError()
- }
- }
-
-The /debug/requests HTTP endpoint organizes the traces by family,
-errors, and duration. It also provides histogram of request duration
-for each family.
-
-A trace.EventLog provides tracing for long-lived objects, such as RPC
-connections.
-
- // A Fetcher fetches URL paths for a single domain.
- type Fetcher struct {
- domain string
- events trace.EventLog
- }
-
- func NewFetcher(domain string) *Fetcher {
- return &Fetcher{
- domain,
- trace.NewEventLog("mypkg.Fetcher", domain),
- }
- }
-
- func (f *Fetcher) Fetch(path string) (string, error) {
- resp, err := http.Get("http://" + f.domain + "/" + path)
- if err != nil {
- f.events.Errorf("Get(%q) = %v", path, err)
- return "", err
- }
- f.events.Printf("Get(%q) = %s", path, resp.Status)
- ...
- }
-
- func (f *Fetcher) Close() error {
- f.events.Finish()
- return nil
- }
-
-The /debug/events HTTP endpoint organizes the event logs by family and
-by time since the last error. The expanded view displays recent log
-entries and the log's call stack.
-*/
-package trace
-
-import (
- "bytes"
- "fmt"
- "html/template"
- "io"
- "log"
- "net"
- "net/http"
- "runtime"
- "sort"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/internal/timeseries"
-)
-
-// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
-// FOR DEBUGGING ONLY. This will slow down the program.
-var DebugUseAfterFinish = false
-
-// AuthRequest determines whether a specific request is permitted to load the
-// /debug/requests or /debug/events pages.
-//
-// It returns two bools; the first indicates whether the page may be viewed at all,
-// and the second indicates whether sensitive events will be shown.
-//
-// AuthRequest may be replaced by a program to customise its authorisation requirements.
-//
-// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1].
-var AuthRequest = func(req *http.Request) (any, sensitive bool) {
- // RemoteAddr is commonly in the form "IP" or "IP:port".
- // If it is in the form "IP:port", split off the port.
- host, _, err := net.SplitHostPort(req.RemoteAddr)
- if err != nil {
- host = req.RemoteAddr
- }
- switch host {
- case "localhost", "127.0.0.1", "::1":
- return true, true
- default:
- return false, false
- }
-}
-
-func init() {
- http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- Render(w, req, sensitive)
- })
- http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
- any, sensitive := AuthRequest(req)
- if !any {
- http.Error(w, "not allowed", http.StatusUnauthorized)
- return
- }
- w.Header().Set("Content-Type", "text/html; charset=utf-8")
- RenderEvents(w, req, sensitive)
- })
-}
-
-// Render renders the HTML page typically served at /debug/requests.
-// It does not do any auth checking; see AuthRequest for the default auth check
-// used by the handler registered on http.DefaultServeMux.
-// req may be nil.
-func Render(w io.Writer, req *http.Request, sensitive bool) {
- data := &struct {
- Families []string
- ActiveTraceCount map[string]int
- CompletedTraces map[string]*family
-
- // Set when a bucket has been selected.
- Traces traceList
- Family string
- Bucket int
- Expanded bool
- Traced bool
- Active bool
- ShowSensitive bool // whether to show sensitive events
-
- Histogram template.HTML
- HistogramWindow string // e.g. "last minute", "last hour", "all time"
-
- // If non-zero, the set of traces is a partial set,
- // and this is the total number.
- Total int
- }{
- CompletedTraces: completedTraces,
- }
-
- data.ShowSensitive = sensitive
- if req != nil {
- // Allow show_sensitive=0 to force hiding of sensitive data for testing.
- // This only goes one way; you can't use show_sensitive=1 to see things.
- if req.FormValue("show_sensitive") == "0" {
- data.ShowSensitive = false
- }
-
- if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
- data.Expanded = exp
- }
- if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
- data.Traced = exp
- }
- }
-
- completedMu.RLock()
- data.Families = make([]string, 0, len(completedTraces))
- for fam := range completedTraces {
- data.Families = append(data.Families, fam)
- }
- completedMu.RUnlock()
- sort.Strings(data.Families)
-
- // We are careful here to minimize the time spent locking activeMu,
- // since that lock is required every time an RPC starts and finishes.
- data.ActiveTraceCount = make(map[string]int, len(data.Families))
- activeMu.RLock()
- for fam, s := range activeTraces {
- data.ActiveTraceCount[fam] = s.Len()
- }
- activeMu.RUnlock()
-
- var ok bool
- data.Family, data.Bucket, ok = parseArgs(req)
- switch {
- case !ok:
- // No-op
- case data.Bucket == -1:
- data.Active = true
- n := data.ActiveTraceCount[data.Family]
- data.Traces = getActiveTraces(data.Family)
- if len(data.Traces) < n {
- data.Total = n
- }
- case data.Bucket < bucketsPerFamily:
- if b := lookupBucket(data.Family, data.Bucket); b != nil {
- data.Traces = b.Copy(data.Traced)
- }
- default:
- if f := getFamily(data.Family, false); f != nil {
- var obs timeseries.Observable
- f.LatencyMu.RLock()
- switch o := data.Bucket - bucketsPerFamily; o {
- case 0:
- obs = f.Latency.Minute()
- data.HistogramWindow = "last minute"
- case 1:
- obs = f.Latency.Hour()
- data.HistogramWindow = "last hour"
- case 2:
- obs = f.Latency.Total()
- data.HistogramWindow = "all time"
- }
- f.LatencyMu.RUnlock()
- if obs != nil {
- data.Histogram = obs.(*histogram).html()
- }
- }
- }
-
- if data.Traces != nil {
- defer data.Traces.Free()
- sort.Sort(data.Traces)
- }
-
- completedMu.RLock()
- defer completedMu.RUnlock()
- if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
- log.Printf("net/trace: Failed executing template: %v", err)
- }
-}
-
-func parseArgs(req *http.Request) (fam string, b int, ok bool) {
- if req == nil {
- return "", 0, false
- }
- fam, bStr := req.FormValue("fam"), req.FormValue("b")
- if fam == "" || bStr == "" {
- return "", 0, false
- }
- b, err := strconv.Atoi(bStr)
- if err != nil || b < -1 {
- return "", 0, false
- }
-
- return fam, b, true
-}
-
-func lookupBucket(fam string, b int) *traceBucket {
- f := getFamily(fam, false)
- if f == nil || b < 0 || b >= len(f.Buckets) {
- return nil
- }
- return f.Buckets[b]
-}
-
-type contextKeyT string
-
-var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
-
-// NewContext returns a copy of the parent context
-// and associates it with a Trace.
-func NewContext(ctx context.Context, tr Trace) context.Context {
- return context.WithValue(ctx, contextKey, tr)
-}
-
-// FromContext returns the Trace bound to the context, if any.
-func FromContext(ctx context.Context) (tr Trace, ok bool) {
- tr, ok = ctx.Value(contextKey).(Trace)
- return
-}
-
-// Trace represents an active request.
-type Trace interface {
- // LazyLog adds x to the event log. It will be evaluated each time the
- // /debug/requests page is rendered. Any memory referenced by x will be
- // pinned until the trace is finished and later discarded.
- LazyLog(x fmt.Stringer, sensitive bool)
-
- // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
- // /debug/requests page is rendered. Any memory referenced by a will be
- // pinned until the trace is finished and later discarded.
- LazyPrintf(format string, a ...interface{})
-
- // SetError declares that this trace resulted in an error.
- SetError()
-
- // SetRecycler sets a recycler for the trace.
- // f will be called for each event passed to LazyLog at a time when
- // it is no longer required, whether while the trace is still active
- // and the event is discarded, or when a completed trace is discarded.
- SetRecycler(f func(interface{}))
-
- // SetTraceInfo sets the trace info for the trace.
- // This is currently unused.
- SetTraceInfo(traceID, spanID uint64)
-
- // SetMaxEvents sets the maximum number of events that will be stored
- // in the trace. This has no effect if any events have already been
- // added to the trace.
- SetMaxEvents(m int)
-
- // Finish declares that this trace is complete.
- // The trace should not be used after calling this method.
- Finish()
-}
-
-type lazySprintf struct {
- format string
- a []interface{}
-}
-
-func (l *lazySprintf) String() string {
- return fmt.Sprintf(l.format, l.a...)
-}
-
-// New returns a new Trace with the specified family and title.
-func New(family, title string) Trace {
- tr := newTrace()
- tr.ref()
- tr.Family, tr.Title = family, title
- tr.Start = time.Now()
- tr.events = make([]event, 0, maxEventsPerTrace)
-
- activeMu.RLock()
- s := activeTraces[tr.Family]
- activeMu.RUnlock()
- if s == nil {
- activeMu.Lock()
- s = activeTraces[tr.Family] // check again
- if s == nil {
- s = new(traceSet)
- activeTraces[tr.Family] = s
- }
- activeMu.Unlock()
- }
- s.Add(tr)
-
- // Trigger allocation of the completed trace structure for this family.
- // This will cause the family to be present in the request page during
- // the first trace of this family. We don't care about the return value,
- // nor is there any need for this to run inline, so we execute it in its
- // own goroutine, but only if the family isn't allocated yet.
- completedMu.RLock()
- if _, ok := completedTraces[tr.Family]; !ok {
- go allocFamily(tr.Family)
- }
- completedMu.RUnlock()
-
- return tr
-}
-
-func (tr *trace) Finish() {
- tr.Elapsed = time.Now().Sub(tr.Start)
- if DebugUseAfterFinish {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- tr.finishStack = buf[:n]
- }
-
- activeMu.RLock()
- m := activeTraces[tr.Family]
- activeMu.RUnlock()
- m.Remove(tr)
-
- f := getFamily(tr.Family, true)
- for _, b := range f.Buckets {
- if b.Cond.match(tr) {
- b.Add(tr)
- }
- }
- // Add a sample of elapsed time as microseconds to the family's timeseries
- h := new(histogram)
- h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
- f.LatencyMu.Lock()
- f.Latency.Add(h)
- f.LatencyMu.Unlock()
-
- tr.unref() // matches ref in New
-}
-
-const (
- bucketsPerFamily = 9
- tracesPerBucket = 10
- maxActiveTraces = 20 // Maximum number of active traces to show.
- maxEventsPerTrace = 10
- numHistogramBuckets = 38
-)
-
-var (
- // The active traces.
- activeMu sync.RWMutex
- activeTraces = make(map[string]*traceSet) // family -> traces
-
- // Families of completed traces.
- completedMu sync.RWMutex
- completedTraces = make(map[string]*family) // family -> traces
-)
-
-type traceSet struct {
- mu sync.RWMutex
- m map[*trace]bool
-
- // We could avoid the entire map scan in FirstN by having a slice of all the traces
- // ordered by start time, and an index into that from the trace struct, with a periodic
- // repack of the slice after enough traces finish; we could also use a skip list or similar.
- // However, that would shift some of the expense from /debug/requests time to RPC time,
- // which is probably the wrong trade-off.
-}
-
-func (ts *traceSet) Len() int {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
- return len(ts.m)
-}
-
-func (ts *traceSet) Add(tr *trace) {
- ts.mu.Lock()
- if ts.m == nil {
- ts.m = make(map[*trace]bool)
- }
- ts.m[tr] = true
- ts.mu.Unlock()
-}
-
-func (ts *traceSet) Remove(tr *trace) {
- ts.mu.Lock()
- delete(ts.m, tr)
- ts.mu.Unlock()
-}
-
-// FirstN returns the first n traces ordered by time.
-func (ts *traceSet) FirstN(n int) traceList {
- ts.mu.RLock()
- defer ts.mu.RUnlock()
-
- if n > len(ts.m) {
- n = len(ts.m)
- }
- trl := make(traceList, 0, n)
-
- // Fast path for when no selectivity is needed.
- if n == len(ts.m) {
- for tr := range ts.m {
- tr.ref()
- trl = append(trl, tr)
- }
- sort.Sort(trl)
- return trl
- }
-
- // Pick the oldest n traces.
- // This is inefficient. See the comment in the traceSet struct.
- for tr := range ts.m {
- // Put the first n traces into trl in the order they occur.
- // When we have n, sort trl, and thereafter maintain its order.
- if len(trl) < n {
- tr.ref()
- trl = append(trl, tr)
- if len(trl) == n {
- // This is guaranteed to happen exactly once during this loop.
- sort.Sort(trl)
- }
- continue
- }
- if tr.Start.After(trl[n-1].Start) {
- continue
- }
-
- // Find where to insert this one.
- tr.ref()
- i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
- trl[n-1].unref()
- copy(trl[i+1:], trl[i:])
- trl[i] = tr
- }
-
- return trl
-}
-
-func getActiveTraces(fam string) traceList {
- activeMu.RLock()
- s := activeTraces[fam]
- activeMu.RUnlock()
- if s == nil {
- return nil
- }
- return s.FirstN(maxActiveTraces)
-}
-
-func getFamily(fam string, allocNew bool) *family {
- completedMu.RLock()
- f := completedTraces[fam]
- completedMu.RUnlock()
- if f == nil && allocNew {
- f = allocFamily(fam)
- }
- return f
-}
-
-func allocFamily(fam string) *family {
- completedMu.Lock()
- defer completedMu.Unlock()
- f := completedTraces[fam]
- if f == nil {
- f = newFamily()
- completedTraces[fam] = f
- }
- return f
-}
-
-// family represents a set of trace buckets and associated latency information.
-type family struct {
- // traces may occur in multiple buckets.
- Buckets [bucketsPerFamily]*traceBucket
-
- // latency time series
- LatencyMu sync.RWMutex
- Latency *timeseries.MinuteHourSeries
-}
-
-func newFamily() *family {
- return &family{
- Buckets: [bucketsPerFamily]*traceBucket{
- {Cond: minCond(0)},
- {Cond: minCond(50 * time.Millisecond)},
- {Cond: minCond(100 * time.Millisecond)},
- {Cond: minCond(200 * time.Millisecond)},
- {Cond: minCond(500 * time.Millisecond)},
- {Cond: minCond(1 * time.Second)},
- {Cond: minCond(10 * time.Second)},
- {Cond: minCond(100 * time.Second)},
- {Cond: errorCond{}},
- },
- Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
- }
-}
-
-// traceBucket represents a size-capped bucket of historic traces,
-// along with a condition for a trace to belong to the bucket.
-type traceBucket struct {
- Cond cond
-
- // Ring buffer implementation of a fixed-size FIFO queue.
- mu sync.RWMutex
- buf [tracesPerBucket]*trace
- start int // < tracesPerBucket
- length int // <= tracesPerBucket
-}
-
-func (b *traceBucket) Add(tr *trace) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- i := b.start + b.length
- if i >= tracesPerBucket {
- i -= tracesPerBucket
- }
- if b.length == tracesPerBucket {
- // "Remove" an element from the bucket.
- b.buf[i].unref()
- b.start++
- if b.start == tracesPerBucket {
- b.start = 0
- }
- }
- b.buf[i] = tr
- if b.length < tracesPerBucket {
- b.length++
- }
- tr.ref()
-}
-
-// Copy returns a copy of the traces in the bucket.
-// If tracedOnly is true, only the traces with trace information will be returned.
-// The logs will be ref'd before returning; the caller should call
-// the Free method when it is done with them.
-// TODO(dsymonds): keep track of traced requests in separate buckets.
-func (b *traceBucket) Copy(tracedOnly bool) traceList {
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- trl := make(traceList, 0, b.length)
- for i, x := 0, b.start; i < b.length; i++ {
- tr := b.buf[x]
- if !tracedOnly || tr.spanID != 0 {
- tr.ref()
- trl = append(trl, tr)
- }
- x++
- if x == b.length {
- x = 0
- }
- }
- return trl
-}
-
-func (b *traceBucket) Empty() bool {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.length == 0
-}
-
-// cond represents a condition on a trace.
-type cond interface {
- match(t *trace) bool
- String() string
-}
-
-type minCond time.Duration
-
-func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
-func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
-
-type errorCond struct{}
-
-func (e errorCond) match(t *trace) bool { return t.IsError }
-func (e errorCond) String() string { return "errors" }
-
-type traceList []*trace
-
-// Free calls unref on each element of the list.
-func (trl traceList) Free() {
- for _, t := range trl {
- t.unref()
- }
-}
-
-// traceList may be sorted in reverse chronological order.
-func (trl traceList) Len() int { return len(trl) }
-func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
-func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
-
-// An event is a timestamped log entry in a trace.
-type event struct {
- When time.Time
- Elapsed time.Duration // since previous event in trace
- NewDay bool // whether this event is on a different day to the previous event
- Recyclable bool // whether this event was passed via LazyLog
- What interface{} // string or fmt.Stringer
- Sensitive bool // whether this event contains sensitive information
-}
-
-// WhenString returns a string representation of the elapsed time of the event.
-// It will include the date if midnight was crossed.
-func (e event) WhenString() string {
- if e.NewDay {
- return e.When.Format("2006/01/02 15:04:05.000000")
- }
- return e.When.Format("15:04:05.000000")
-}
-
-// discarded represents a number of discarded events.
-// It is stored as *discarded to make it easier to update in-place.
-type discarded int
-
-func (d *discarded) String() string {
- return fmt.Sprintf("(%d events discarded)", int(*d))
-}
-
-// trace represents an active or complete request,
-// either sent or received by this program.
-type trace struct {
- // Family is the top-level grouping of traces to which this belongs.
- Family string
-
- // Title is the title of this trace.
- Title string
-
- // Timing information.
- Start time.Time
- Elapsed time.Duration // zero while active
-
- // Trace information if non-zero.
- traceID uint64
- spanID uint64
-
- // Whether this trace resulted in an error.
- IsError bool
-
- // Append-only sequence of events (modulo discards).
- mu sync.RWMutex
- events []event
-
- refs int32 // how many buckets this is in
- recycler func(interface{})
- disc discarded // scratch space to avoid allocation
-
- finishStack []byte // where finish was called, if DebugUseAfterFinish is set
-}
-
-func (tr *trace) reset() {
- // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
- tr.Family = ""
- tr.Title = ""
- tr.Start = time.Time{}
- tr.Elapsed = 0
- tr.traceID = 0
- tr.spanID = 0
- tr.IsError = false
- tr.events = nil
- tr.refs = 0
- tr.recycler = nil
- tr.disc = 0
- tr.finishStack = nil
-}
-
-// delta returns the elapsed time since the last event or the trace start,
-// and whether it spans midnight.
-// L >= tr.mu
-func (tr *trace) delta(t time.Time) (time.Duration, bool) {
- if len(tr.events) == 0 {
- return t.Sub(tr.Start), false
- }
- prev := tr.events[len(tr.events)-1].When
- return t.Sub(prev), prev.Day() != t.Day()
-}
-
-func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
- if DebugUseAfterFinish && tr.finishStack != nil {
- buf := make([]byte, 4<<10) // 4 KB should be enough
- n := runtime.Stack(buf, false)
- log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
- }
-
- /*
- NOTE TO DEBUGGERS
-
- If you are here because your program panicked in this code,
- it is almost definitely the fault of code using this package,
- and very unlikely to be the fault of this code.
-
- The most likely scenario is that some code elsewhere is using
- a requestz.Trace after its Finish method is called.
- You can temporarily set the DebugUseAfterFinish var
- to help discover where that is; do not leave that var set,
- since it makes this package much less efficient.
- */
-
- e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
- tr.mu.Lock()
- e.Elapsed, e.NewDay = tr.delta(e.When)
- if len(tr.events) < cap(tr.events) {
- tr.events = append(tr.events, e)
- } else {
- // Discard the middle events.
- di := int((cap(tr.events) - 1) / 2)
- if d, ok := tr.events[di].What.(*discarded); ok {
- (*d)++
- } else {
- // disc starts at two to count for the event it is replacing,
- // plus the next one that we are about to drop.
- tr.disc = 2
- if tr.recycler != nil && tr.events[di].Recyclable {
- go tr.recycler(tr.events[di].What)
- }
- tr.events[di].What = &tr.disc
- }
- // The timestamp of the discarded meta-event should be
- // the time of the last event it is representing.
- tr.events[di].When = tr.events[di+1].When
-
- if tr.recycler != nil && tr.events[di+1].Recyclable {
- go tr.recycler(tr.events[di+1].What)
- }
- copy(tr.events[di+1:], tr.events[di+2:])
- tr.events[cap(tr.events)-1] = e
- }
- tr.mu.Unlock()
-}
-
-func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
- tr.addEvent(x, true, sensitive)
-}
-
-func (tr *trace) LazyPrintf(format string, a ...interface{}) {
- tr.addEvent(&lazySprintf{format, a}, false, false)
-}
-
-func (tr *trace) SetError() { tr.IsError = true }
-
-func (tr *trace) SetRecycler(f func(interface{})) {
- tr.recycler = f
-}
-
-func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
- tr.traceID, tr.spanID = traceID, spanID
-}
-
-func (tr *trace) SetMaxEvents(m int) {
- // Always keep at least three events: first, discarded count, last.
- if len(tr.events) == 0 && m > 3 {
- tr.events = make([]event, 0, m)
- }
-}
-
-func (tr *trace) ref() {
- atomic.AddInt32(&tr.refs, 1)
-}
-
-func (tr *trace) unref() {
- if atomic.AddInt32(&tr.refs, -1) == 0 {
- if tr.recycler != nil {
- // freeTrace clears tr, so we hold tr.recycler and tr.events here.
- go func(f func(interface{}), es []event) {
- for _, e := range es {
- if e.Recyclable {
- f(e.What)
- }
- }
- }(tr.recycler, tr.events)
- }
-
- freeTrace(tr)
- }
-}
-
-func (tr *trace) When() string {
- return tr.Start.Format("2006/01/02 15:04:05.000000")
-}
-
-func (tr *trace) ElapsedTime() string {
- t := tr.Elapsed
- if t == 0 {
- // Active trace.
- t = time.Since(tr.Start)
- }
- return fmt.Sprintf("%.6f", t.Seconds())
-}
-
-func (tr *trace) Events() []event {
- tr.mu.RLock()
- defer tr.mu.RUnlock()
- return tr.events
-}
-
-var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
-
-// newTrace returns a trace ready to use.
-func newTrace() *trace {
- select {
- case tr := <-traceFreeList:
- return tr
- default:
- return new(trace)
- }
-}
-
-// freeTrace adds tr to traceFreeList if there's room.
-// This is non-blocking.
-func freeTrace(tr *trace) {
- if DebugUseAfterFinish {
- return // never reuse
- }
- tr.reset()
- select {
- case traceFreeList <- tr:
- default:
- }
-}
-
-func elapsed(d time.Duration) string {
- b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
-
- // For subsecond durations, blank all zeros before decimal point,
- // and all zeros between the decimal point and the first non-zero digit.
- if d < time.Second {
- dot := bytes.IndexByte(b, '.')
- for i := 0; i < dot; i++ {
- b[i] = ' '
- }
- for i := dot + 1; i < len(b); i++ {
- if b[i] == '0' {
- b[i] = ' '
- } else {
- break
- }
- }
- }
-
- return string(b)
-}
-
-var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
- "elapsed": elapsed,
- "add": func(a, b int) int { return a + b },
-}).Parse(pageHTML))
-
-const pageHTML = `
-{{template "Prolog" .}}
-{{template "StatusTable" .}}
-{{template "Epilog" .}}
-
-{{define "Prolog"}}
-
-
- /debug/requests
-
-
-
-
-/debug/requests
-{{end}} {{/* end of Prolog */}}
-
-{{define "StatusTable"}}
-
-{{end}} {{/* end of StatusTable */}}
-
-{{define "Epilog"}}
-{{if $.Traces}}
-
-Family: {{$.Family}}
-
-{{if or $.Expanded $.Traced}}
- [Normal/Summary]
-{{else}}
- [Normal/Summary]
-{{end}}
-
-{{if or (not $.Expanded) $.Traced}}
- [Normal/Expanded]
-{{else}}
- [Normal/Expanded]
-{{end}}
-
-{{if not $.Active}}
- {{if or $.Expanded (not $.Traced)}}
- [Traced/Summary]
- {{else}}
- [Traced/Summary]
- {{end}}
- {{if or (not $.Expanded) (not $.Traced)}}
- [Traced/Expanded]
- {{else}}
- [Traced/Expanded]
- {{end}}
-{{end}}
-
-{{if $.Total}}
-Showing {{len $.Traces}} of {{$.Total}} traces.
-{{end}}
-
-
-
- {{if $.Active}}Active{{else}}Completed{{end}} Requests
-
- When | Elapsed (s) |
- {{range $tr := $.Traces}}
-
- {{$tr.When}} |
- {{$tr.ElapsedTime}} |
- {{$tr.Title}} |
- {{/* TODO: include traceID/spanID */}}
-
- {{if $.Expanded}}
- {{range $tr.Events}}
-
- {{.WhenString}} |
- {{elapsed .Elapsed}} |
- {{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}} |
-
- {{end}}
- {{end}}
- {{end}}
-
-{{end}} {{/* if $.Traces */}}
-
-{{if $.Histogram}}
-Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}
-{{$.Histogram}}
-{{end}} {{/* if $.Histogram */}}
-
-
-
-{{end}} {{/* end of Epilog */}}
-`
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml
deleted file mode 100644
index a035125c35..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.3
- - 1.4
-
-install:
- - export GOPATH="$HOME/gopath"
- - mkdir -p "$GOPATH/src/golang.org/x"
- - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- - go get -v -t -d golang.org/x/oauth2/...
-
-script:
- - go test -v golang.org/x/oauth2/...
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS
deleted file mode 100644
index 15167cd746..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md
deleted file mode 100644
index 46aa2b12dd..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Contributing to Go
-
-Go is an open source project.
-
-It is the work of hundreds of contributors. We appreciate your help!
-
-
-## Filing issues
-
-When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
-
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
-
-General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
-The gophers there will answer or ask you to file an issue if you've tripped over a bug.
-
-## Contributing code
-
-Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
-before sending patches.
-
-**We do not accept GitHub pull requests**
-(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
-
-Unless otherwise noted, the Go source files are distributed under
-the BSD-style license found in the LICENSE file.
-
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS
deleted file mode 100644
index 1c4577e968..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE
deleted file mode 100644
index d02f24fd52..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The oauth2 Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md
deleted file mode 100644
index 0d5141733f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# OAuth2 for Go
-
-[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
-
-oauth2 package contains a client implementation for OAuth 2.0 spec.
-
-## Installation
-
-~~~~
-go get golang.org/x/oauth2
-~~~~
-
-See godoc for further documentation and examples.
-
-* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
-* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
-
-
-## App Engine
-
-In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
-of the [`context.Context`](https://golang.org/x/net/context#Context) type from
-the `golang.org/x/net/context` package
-
-This means its no longer possible to use the "Classic App Engine"
-`appengine.Context` type with the `oauth2` package. (You're using
-Classic App Engine if you import the package `"appengine"`.)
-
-To work around this, you may use the new `"google.golang.org/appengine"`
-package. This package has almost the same API as the `"appengine"` package,
-but it can be fetched with `go get` and used on "Managed VMs" and well as
-Classic App Engine.
-
-See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
-for information on updating your app.
-
-If you don't want to update your entire app to use the new App Engine packages,
-you may use both sets of packages in parallel, using only the new packages
-with the `oauth2` package.
-
- import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- newappengine "google.golang.org/appengine"
- newurlfetch "google.golang.org/appengine/urlfetch"
-
- "appengine"
- )
-
- func handler(w http.ResponseWriter, r *http.Request) {
- var c appengine.Context = appengine.NewContext(r)
- c.Infof("Logging a message with the old package")
-
- var ctx context.Context = newappengine.NewContext(r)
- client := &http.Client{
- Transport: &oauth2.Transport{
- Source: google.AppEngineTokenSource(ctx, "scope"),
- Base: &newurlfetch.Transport{Context: ctx},
- },
- }
- client.Get("...")
- }
-
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go
deleted file mode 100644
index 8962c49d1d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-// App Engine hooks.
-
-package oauth2
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/internal"
- "google.golang.org/appengine/urlfetch"
-)
-
-func init() {
- internal.RegisterContextClientFunc(contextClientAppEngine)
-}
-
-func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
- return urlfetch.Client(ctx), nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go
deleted file mode 100644
index dc993efb5e..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package google
-
-import (
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
-)
-
-// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
-var appengineVM bool
-
-// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
-var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
-
-// AppEngineTokenSource returns a token source that fetches tokens
-// issued to the current App Engine application's service account.
-// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
-// that involves user accounts, see oauth2.Config instead.
-//
-// The provided context must have come from appengine.NewContext.
-func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
- scopes := append([]string{}, scope...)
- sort.Strings(scopes)
- return &appEngineTokenSource{
- ctx: ctx,
- scopes: scopes,
- key: strings.Join(scopes, " "),
- }
-}
-
-// aeTokens helps the fetched tokens to be reused until their expiration.
-var (
- aeTokensMu sync.Mutex
- aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
-)
-
-type tokenLock struct {
- mu sync.Mutex // guards t; held while fetching or updating t
- t *oauth2.Token
-}
-
-type appEngineTokenSource struct {
- ctx context.Context
- scopes []string
- key string // to aeTokens map; space-separated scopes
-}
-
-func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
-
- aeTokensMu.Lock()
- tok, ok := aeTokens[ts.key]
- if !ok {
- tok = &tokenLock{}
- aeTokens[ts.key] = tok
- }
- aeTokensMu.Unlock()
-
- tok.mu.Lock()
- defer tok.mu.Unlock()
- if tok.t.Valid() {
- return tok.t, nil
- }
- access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
- if err != nil {
- return nil, err
- }
- tok.t = &oauth2.Token{
- AccessToken: access,
- Expiry: exp,
- }
- return tok.t, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go
deleted file mode 100644
index 4f42c8b343..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package google
-
-import "google.golang.org/appengine"
-
-func init() {
- appengineTokenFunc = appengine.AccessToken
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
deleted file mode 100644
index 633611cc3a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The oauth2 Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appenginevm
-
-package google
-
-import "google.golang.org/appengine"
-
-func init() {
- appengineVM = true
- appengineTokenFunc = appengine.AccessToken
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go
deleted file mode 100644
index b952362977..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package google
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "runtime"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/jwt"
- "google.golang.org/cloud/compute/metadata"
-)
-
-// DefaultClient returns an HTTP Client that uses the
-// DefaultTokenSource to obtain authentication credentials.
-//
-// This client should be used when developing services
-// that run on Google App Engine or Google Compute Engine
-// and use "Application Default Credentials."
-//
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-//
-func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
- ts, err := DefaultTokenSource(ctx, scope...)
- if err != nil {
- return nil, err
- }
- return oauth2.NewClient(ctx, ts), nil
-}
-
-// DefaultTokenSource is a token source that uses
-// "Application Default Credentials".
-//
-// It looks for credentials in the following places,
-// preferring the first location found:
-//
-// 1. A JSON file whose path is specified by the
-// GOOGLE_APPLICATION_CREDENTIALS environment variable.
-// 2. A JSON file in a location known to the gcloud command-line tool.
-// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
-// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
-// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-//
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-//
-func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
- // First, try the environment variable.
- const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
- if filename := os.Getenv(envVar); filename != "" {
- ts, err := tokenSourceFromFile(ctx, filename, scope)
- if err != nil {
- return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
- }
- return ts, nil
- }
-
- // Second, try a well-known file.
- filename := wellKnownFile()
- _, err := os.Stat(filename)
- if err == nil {
- ts, err2 := tokenSourceFromFile(ctx, filename, scope)
- if err2 == nil {
- return ts, nil
- }
- err = err2
- } else if os.IsNotExist(err) {
- err = nil // ignore this error
- }
- if err != nil {
- return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
- }
-
- // Third, if we're on Google App Engine use those credentials.
- if appengineTokenFunc != nil && !appengineVM {
- return AppEngineTokenSource(ctx, scope...), nil
- }
-
- // Fourth, if we're on Google Compute Engine use the metadata server.
- if metadata.OnGCE() {
- return ComputeTokenSource(""), nil
- }
-
- // None are found; return helpful error.
- const url = "https://developers.google.com/accounts/docs/application-default-credentials"
- return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
-}
-
-func wellKnownFile() string {
- const f = "application_default_credentials.json"
- if runtime.GOOS == "windows" {
- return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
- }
- return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
-}
-
-func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- var d struct {
- // Common fields
- Type string
- ClientID string `json:"client_id"`
-
- // User Credential fields
- ClientSecret string `json:"client_secret"`
- RefreshToken string `json:"refresh_token"`
-
- // Service Account fields
- ClientEmail string `json:"client_email"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- }
- if err := json.Unmarshal(b, &d); err != nil {
- return nil, err
- }
- switch d.Type {
- case "authorized_user":
- cfg := &oauth2.Config{
- ClientID: d.ClientID,
- ClientSecret: d.ClientSecret,
- Scopes: append([]string{}, scopes...), // copy
- Endpoint: Endpoint,
- }
- tok := &oauth2.Token{RefreshToken: d.RefreshToken}
- return cfg.TokenSource(ctx, tok), nil
- case "service_account":
- cfg := &jwt.Config{
- Email: d.ClientEmail,
- PrivateKey: []byte(d.PrivateKey),
- Scopes: append([]string{}, scopes...), // copy
- TokenURL: JWTTokenURL,
- }
- return cfg.TokenSource(ctx), nil
- case "":
- return nil, errors.New("missing 'type' field in credentials")
- default:
- return nil, fmt.Errorf("unknown credential type: %q", d.Type)
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go
deleted file mode 100644
index 0bed738668..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package google provides support for making OAuth2 authorized and
-// authenticated HTTP requests to Google APIs.
-// It supports the Web server flow, client-side credentials, service accounts,
-// Google Compute Engine service accounts, and Google App Engine service
-// accounts.
-//
-// For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-package google
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/jwt"
- "google.golang.org/cloud/compute/metadata"
-)
-
-// Endpoint is Google's OAuth 2.0 endpoint.
-var Endpoint = oauth2.Endpoint{
- AuthURL: "https://accounts.google.com/o/oauth2/auth",
- TokenURL: "https://accounts.google.com/o/oauth2/token",
-}
-
-// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
-const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
-
-// ConfigFromJSON uses a Google Developers Console client_credentials.json
-// file to construct a config.
-// client_credentials.json can be downloadable from https://console.developers.google.com,
-// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
-// JSON format and provide the contents of the file as jsonKey.
-func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
- type cred struct {
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- RedirectURIs []string `json:"redirect_uris"`
- AuthURI string `json:"auth_uri"`
- TokenURI string `json:"token_uri"`
- }
- var j struct {
- Web *cred `json:"web"`
- Installed *cred `json:"installed"`
- }
- if err := json.Unmarshal(jsonKey, &j); err != nil {
- return nil, err
- }
- var c *cred
- switch {
- case j.Web != nil:
- c = j.Web
- case j.Installed != nil:
- c = j.Installed
- default:
- return nil, fmt.Errorf("oauth2/google: no credentials found")
- }
- if len(c.RedirectURIs) < 1 {
- return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
- }
- return &oauth2.Config{
- ClientID: c.ClientID,
- ClientSecret: c.ClientSecret,
- RedirectURL: c.RedirectURIs[0],
- Scopes: scope,
- Endpoint: oauth2.Endpoint{
- AuthURL: c.AuthURI,
- TokenURL: c.TokenURI,
- },
- }, nil
-}
-
-// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
-// the credentials that authorize and authenticate the requests.
-// Create a service account on "Credentials" page under "APIs & Auth" for your
-// project at https://console.developers.google.com to download a JSON key file.
-func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
- var key struct {
- Email string `json:"client_email"`
- PrivateKey string `json:"private_key"`
- }
- if err := json.Unmarshal(jsonKey, &key); err != nil {
- return nil, err
- }
- return &jwt.Config{
- Email: key.Email,
- PrivateKey: []byte(key.PrivateKey),
- Scopes: scope,
- TokenURL: JWTTokenURL,
- }, nil
-}
-
-// ComputeTokenSource returns a token source that fetches access tokens
-// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
-// this token source if your program is running on a GCE instance.
-// If no account is specified, "default" is used.
-// Further information about retrieving access tokens from the GCE metadata
-// server can be found at https://cloud.google.com/compute/docs/authentication.
-func ComputeTokenSource(account string) oauth2.TokenSource {
- return oauth2.ReuseTokenSource(nil, computeSource{account: account})
-}
-
-type computeSource struct {
- account string
-}
-
-func (cs computeSource) Token() (*oauth2.Token, error) {
- if !metadata.OnGCE() {
- return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
- }
- acct := cs.account
- if acct == "" {
- acct = "default"
- }
- tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
- if err != nil {
- return nil, err
- }
- var res struct {
- AccessToken string `json:"access_token"`
- ExpiresInSec int `json:"expires_in"`
- TokenType string `json:"token_type"`
- }
- err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
- if err != nil {
- return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
- }
- if res.ExpiresInSec == 0 || res.AccessToken == "" {
- return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
- }
- return &oauth2.Token{
- AccessToken: res.AccessToken,
- TokenType: res.TokenType,
- Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
- }, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go
deleted file mode 100644
index b91991786f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package google
-
-import (
- "crypto/rsa"
- "fmt"
- "time"
-
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/internal"
- "golang.org/x/oauth2/jws"
-)
-
-// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
-// key file to read the credentials that authorize and authenticate the
-// requests, and returns a TokenSource that does not use any OAuth2 flow but
-// instead creates a JWT and sends that as the access token.
-// The audience is typically a URL that specifies the scope of the credentials.
-//
-// Note that this is not a standard OAuth flow, but rather an
-// optimization supported by a few Google services.
-// Unless you know otherwise, you should use JWTConfigFromJSON instead.
-func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
- cfg, err := JWTConfigFromJSON(jsonKey)
- if err != nil {
- return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
- }
- pk, err := internal.ParseKey(cfg.PrivateKey)
- if err != nil {
- return nil, fmt.Errorf("google: could not parse key: %v", err)
- }
- ts := &jwtAccessTokenSource{
- email: cfg.Email,
- audience: audience,
- pk: pk,
- }
- tok, err := ts.Token()
- if err != nil {
- return nil, err
- }
- return oauth2.ReuseTokenSource(tok, ts), nil
-}
-
-type jwtAccessTokenSource struct {
- email, audience string
- pk *rsa.PrivateKey
-}
-
-func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
- iat := time.Now()
- exp := iat.Add(time.Hour)
- cs := &jws.ClaimSet{
- Iss: ts.email,
- Sub: ts.email,
- Aud: ts.audience,
- Iat: iat.Unix(),
- Exp: exp.Unix(),
- }
- hdr := &jws.Header{
- Algorithm: "RS256",
- Typ: "JWT",
- }
- msg, err := jws.Encode(hdr, cs, ts.pk)
- if err != nil {
- return nil, fmt.Errorf("google: could not encode JWT: %v", err)
- }
- return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go
deleted file mode 100644
index d29a3bb9bb..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package google
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
- "os/user"
- "path/filepath"
- "runtime"
- "strings"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/internal"
-)
-
-type sdkCredentials struct {
- Data []struct {
- Credential struct {
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- AccessToken string `json:"access_token"`
- RefreshToken string `json:"refresh_token"`
- TokenExpiry *time.Time `json:"token_expiry"`
- } `json:"credential"`
- Key struct {
- Account string `json:"account"`
- Scope string `json:"scope"`
- } `json:"key"`
- }
-}
-
-// An SDKConfig provides access to tokens from an account already
-// authorized via the Google Cloud SDK.
-type SDKConfig struct {
- conf oauth2.Config
- initialToken *oauth2.Token
-}
-
-// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
-// account. If account is empty, the account currently active in
-// Google Cloud SDK properties is used.
-// Google Cloud SDK credentials must be created by running `gcloud auth`
-// before using this function.
-// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
-func NewSDKConfig(account string) (*SDKConfig, error) {
- configPath, err := sdkConfigPath()
- if err != nil {
- return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
- }
- credentialsPath := filepath.Join(configPath, "credentials")
- f, err := os.Open(credentialsPath)
- if err != nil {
- return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
- }
- defer f.Close()
-
- var c sdkCredentials
- if err := json.NewDecoder(f).Decode(&c); err != nil {
- return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
- }
- if len(c.Data) == 0 {
- return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
- }
- if account == "" {
- propertiesPath := filepath.Join(configPath, "properties")
- f, err := os.Open(propertiesPath)
- if err != nil {
- return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
- }
- defer f.Close()
- ini, err := internal.ParseINI(f)
- if err != nil {
- return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
- }
- core, ok := ini["core"]
- if !ok {
- return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
- }
- active, ok := core["account"]
- if !ok {
- return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
- }
- account = active
- }
-
- for _, d := range c.Data {
- if account == "" || d.Key.Account == account {
- if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
- return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
- }
- var expiry time.Time
- if d.Credential.TokenExpiry != nil {
- expiry = *d.Credential.TokenExpiry
- }
- return &SDKConfig{
- conf: oauth2.Config{
- ClientID: d.Credential.ClientID,
- ClientSecret: d.Credential.ClientSecret,
- Scopes: strings.Split(d.Key.Scope, " "),
- Endpoint: Endpoint,
- RedirectURL: "oob",
- },
- initialToken: &oauth2.Token{
- AccessToken: d.Credential.AccessToken,
- RefreshToken: d.Credential.RefreshToken,
- Expiry: expiry,
- },
- }, nil
- }
- }
- return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
-}
-
-// Client returns an HTTP client using Google Cloud SDK credentials to
-// authorize requests. The token will auto-refresh as necessary. The
-// underlying http.RoundTripper will be obtained using the provided
-// context. The returned client and its Transport should not be
-// modified.
-func (c *SDKConfig) Client(ctx context.Context) *http.Client {
- return &http.Client{
- Transport: &oauth2.Transport{
- Source: c.TokenSource(ctx),
- },
- }
-}
-
-// TokenSource returns an oauth2.TokenSource that retrieve tokens from
-// Google Cloud SDK credentials using the provided context.
-// It will returns the current access token stored in the credentials,
-// and refresh it when it expires, but it won't update the credentials
-// with the new access token.
-func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
- return c.conf.TokenSource(ctx, c.initialToken)
-}
-
-// Scopes are the OAuth 2.0 scopes the current account is authorized for.
-func (c *SDKConfig) Scopes() []string {
- return c.conf.Scopes
-}
-
-// sdkConfigPath tries to guess where the gcloud config is located.
-// It can be overridden during tests.
-var sdkConfigPath = func() (string, error) {
- if runtime.GOOS == "windows" {
- return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
- }
- homeDir := guessUnixHomeDir()
- if homeDir == "" {
- return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
- }
- return filepath.Join(homeDir, ".config", "gcloud"), nil
-}
-
-func guessUnixHomeDir() string {
- usr, err := user.Current()
- if err == nil {
- return usr.HomeDir
- }
- return os.Getenv("HOME")
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go
deleted file mode 100644
index fbe1028d64..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package internal contains support packages for oauth2 package.
-package internal
-
-import (
- "bufio"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "strings"
-)
-
-// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
- block, _ := pem.Decode(key)
- if block != nil {
- key = block.Bytes
- }
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
- if err != nil {
- parsedKey, err = x509.ParsePKCS1PrivateKey(key)
- if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
- }
- }
- parsed, ok := parsedKey.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("private key is invalid")
- }
- return parsed, nil
-}
-
-func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
- result := map[string]map[string]string{
- "": map[string]string{}, // root section
- }
- scanner := bufio.NewScanner(ini)
- currentSection := ""
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if strings.HasPrefix(line, ";") {
- // comment.
- continue
- }
- if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
- currentSection = strings.TrimSpace(line[1 : len(line)-1])
- result[currentSection] = map[string]string{}
- continue
- }
- parts := strings.SplitN(line, "=", 2)
- if len(parts) == 2 && parts[0] != "" {
- result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
- }
- }
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning ini: %v", err)
- }
- return result, nil
-}
-
-func CondVal(v string) []string {
- if v == "" {
- return nil
- }
- return []string{v}
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go
deleted file mode 100644
index 739a89bfe9..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package internal contains support packages for oauth2 package.
-package internal
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// Token represents the crendentials used to authorize
-// the requests to access protected resources on the OAuth 2.0
-// provider's backend.
-//
-// This type is a mirror of oauth2.Token and exists to break
-// an otherwise-circular dependency. Other internal packages
-// should convert this Token into an oauth2.Token before use.
-type Token struct {
- // AccessToken is the token that authorizes and authenticates
- // the requests.
- AccessToken string
-
- // TokenType is the type of token.
- // The Type method returns either this or "Bearer", the default.
- TokenType string
-
- // RefreshToken is a token that's used by the application
- // (as opposed to the user) to refresh the access token
- // if it expires.
- RefreshToken string
-
- // Expiry is the optional expiration time of the access token.
- //
- // If zero, TokenSource implementations will reuse the same
- // token forever and RefreshToken or equivalent
- // mechanisms for that TokenSource will not be used.
- Expiry time.Time
-
- // Raw optionally contains extra metadata from the server
- // when updating a token.
- Raw interface{}
-}
-
-// tokenJSON is the struct representing the HTTP response from OAuth2
-// providers returning a token in JSON form.
-type tokenJSON struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
- Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
-}
-
-func (e *tokenJSON) expiry() (t time.Time) {
- if v := e.ExpiresIn; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
- if v := e.Expires; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
- return
-}
-
-type expirationTime int32
-
-func (e *expirationTime) UnmarshalJSON(b []byte) error {
- var n json.Number
- err := json.Unmarshal(b, &n)
- if err != nil {
- return err
- }
- i, err := n.Int64()
- if err != nil {
- return err
- }
- *e = expirationTime(i)
- return nil
-}
-
-var brokenAuthHeaderProviders = []string{
- "https://accounts.google.com/",
- "https://api.dropbox.com/",
- "https://api.instagram.com/",
- "https://api.netatmo.net/",
- "https://api.odnoklassniki.ru/",
- "https://api.pushbullet.com/",
- "https://api.soundcloud.com/",
- "https://api.twitch.tv/",
- "https://app.box.com/",
- "https://connect.stripe.com/",
- "https://login.microsoftonline.com/",
- "https://login.salesforce.com/",
- "https://oauth.sandbox.trainingpeaks.com/",
- "https://oauth.trainingpeaks.com/",
- "https://oauth.vk.com/",
- "https://openapi.baidu.com/",
- "https://slack.com/",
- "https://test-sandbox.auth.corp.google.com",
- "https://test.salesforce.com/",
- "https://user.gini.net/",
- "https://www.douban.com/",
- "https://www.googleapis.com/",
- "https://www.linkedin.com/",
- "https://www.strava.com/oauth/",
- "https://www.wunderlist.com/oauth/",
- "https://api.patreon.com/",
-}
-
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
-}
-
-// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
-// implements the OAuth2 spec correctly
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-// In summary:
-// - Reddit only accepts client secret in the Authorization header
-// - Dropbox accepts either it in URL param or Auth header, but not both.
-// - Google only accepts URL param (not spec compliant?), not Auth header
-// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
-func providerAuthHeaderWorks(tokenURL string) bool {
- for _, s := range brokenAuthHeaderProviders {
- if strings.HasPrefix(tokenURL, s) {
- // Some sites fail to implement the OAuth2 spec fully.
- return false
- }
- }
-
- // Assume the provider implements the spec properly
- // otherwise. We can add more exceptions as they're
- // discovered. We will _not_ be adding configurable hooks
- // to this package to let users select server bugs.
- return true
-}
-
-func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
- hc, err := ContextClient(ctx)
- if err != nil {
- return nil, err
- }
- v.Set("client_id", ClientID)
- bustedAuth := !providerAuthHeaderWorks(TokenURL)
- if bustedAuth && ClientSecret != "" {
- v.Set("client_secret", ClientSecret)
- }
- req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if !bustedAuth {
- req.SetBasicAuth(ClientID, ClientSecret)
- }
- r, err := hc.Do(req)
- if err != nil {
- return nil, err
- }
- defer r.Body.Close()
- body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
- if err != nil {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
- }
- if code := r.StatusCode; code < 200 || code > 299 {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
- }
-
- var token *Token
- content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
- switch content {
- case "application/x-www-form-urlencoded", "text/plain":
- vals, err := url.ParseQuery(string(body))
- if err != nil {
- return nil, err
- }
- token = &Token{
- AccessToken: vals.Get("access_token"),
- TokenType: vals.Get("token_type"),
- RefreshToken: vals.Get("refresh_token"),
- Raw: vals,
- }
- e := vals.Get("expires_in")
- if e == "" {
- // TODO(jbd): Facebook's OAuth2 implementation is broken and
- // returns expires_in field in expires. Remove the fallback to expires,
- // when Facebook fixes their implementation.
- e = vals.Get("expires")
- }
- expires, _ := strconv.Atoi(e)
- if expires != 0 {
- token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
- }
- default:
- var tj tokenJSON
- if err = json.Unmarshal(body, &tj); err != nil {
- return nil, err
- }
- token = &Token{
- AccessToken: tj.AccessToken,
- TokenType: tj.TokenType,
- RefreshToken: tj.RefreshToken,
- Expiry: tj.expiry(),
- Raw: make(map[string]interface{}),
- }
- json.Unmarshal(body, &token.Raw) // no error checks for optional fields
- }
- // Don't overwrite `RefreshToken` with an empty value
- // if this was a token refreshing request.
- if token.RefreshToken == "" {
- token.RefreshToken = v.Get("refresh_token")
- }
- return token, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go
deleted file mode 100644
index f1f173e345..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package internal contains support packages for oauth2 package.
-package internal
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
-)
-
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
-var HTTPClient ContextKey
-
-// ContextKey is just an empty struct. It exists so HTTPClient can be
-// an immutable public variable with a unique type. It's immutable
-// because nobody else can create a ContextKey, being unexported.
-type ContextKey struct{}
-
-// ContextClientFunc is a func which tries to return an *http.Client
-// given a Context value. If it returns an error, the search stops
-// with that error. If it returns (nil, nil), the search continues
-// down the list of registered funcs.
-type ContextClientFunc func(context.Context) (*http.Client, error)
-
-var contextClientFuncs []ContextClientFunc
-
-func RegisterContextClientFunc(fn ContextClientFunc) {
- contextClientFuncs = append(contextClientFuncs, fn)
-}
-
-func ContextClient(ctx context.Context) (*http.Client, error) {
- if ctx != nil {
- if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return hc, nil
- }
- }
- for _, fn := range contextClientFuncs {
- c, err := fn(ctx)
- if err != nil {
- return nil, err
- }
- if c != nil {
- return c, nil
- }
- }
- return http.DefaultClient, nil
-}
-
-func ContextTransport(ctx context.Context) http.RoundTripper {
- hc, err := ContextClient(ctx)
- // This is a rare error case (somebody using nil on App Engine).
- if err != nil {
- return ErrorTransport{err}
- }
- return hc.Transport
-}
-
-// ErrorTransport returns the specified error on RoundTrip.
-// This RoundTripper should be used in rare error cases where
-// error handling can be postponed to response handling time.
-type ErrorTransport struct{ Err error }
-
-func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
- return nil, t.Err
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go
deleted file mode 100644
index b46edb27c1..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package jws provides encoding and decoding utilities for
-// signed JWS messages.
-package jws
-
-import (
- "bytes"
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "strings"
- "time"
-)
-
-// ClaimSet contains information about the JWT signature including the
-// permissions being requested (scopes), the target of the token, the issuer,
-// the time the token was issued, and the lifetime of the token.
-type ClaimSet struct {
- Iss string `json:"iss"` // email address of the client_id of the application making the access token request
- Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
- Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
- Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
- Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
- Typ string `json:"typ,omitempty"` // token type (Optional).
-
- // Email for which the application is requesting delegated access (Optional).
- Sub string `json:"sub,omitempty"`
-
- // The old name of Sub. Client keeps setting Prn to be
- // complaint with legacy OAuth 2.0 providers. (Optional)
- Prn string `json:"prn,omitempty"`
-
- // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
- // This array is marshalled using custom code (see (c *ClaimSet) encode()).
- PrivateClaims map[string]interface{} `json:"-"`
-}
-
-func (c *ClaimSet) encode() (string, error) {
- // Reverting time back for machines whose time is not perfectly in sync.
- // If client machine's time is in the future according
- // to Google servers, an access token will not be issued.
- now := time.Now().Add(-10 * time.Second)
- if c.Iat == 0 {
- c.Iat = now.Unix()
- }
- if c.Exp == 0 {
- c.Exp = now.Add(time.Hour).Unix()
- }
- if c.Exp < c.Iat {
- return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
- }
-
- b, err := json.Marshal(c)
- if err != nil {
- return "", err
- }
-
- if len(c.PrivateClaims) == 0 {
- return base64Encode(b), nil
- }
-
- // Marshal private claim set and then append it to b.
- prv, err := json.Marshal(c.PrivateClaims)
- if err != nil {
- return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
- }
-
- // Concatenate public and private claim JSON objects.
- if !bytes.HasSuffix(b, []byte{'}'}) {
- return "", fmt.Errorf("jws: invalid JSON %s", b)
- }
- if !bytes.HasPrefix(prv, []byte{'{'}) {
- return "", fmt.Errorf("jws: invalid JSON %s", prv)
- }
- b[len(b)-1] = ',' // Replace closing curly brace with a comma.
- b = append(b, prv[1:]...) // Append private claims.
- return base64Encode(b), nil
-}
-
-// Header represents the header for the signed JWS payloads.
-type Header struct {
- // The algorithm used for signature.
- Algorithm string `json:"alg"`
-
- // Represents the token type.
- Typ string `json:"typ"`
-}
-
-func (h *Header) encode() (string, error) {
- b, err := json.Marshal(h)
- if err != nil {
- return "", err
- }
- return base64Encode(b), nil
-}
-
-// Decode decodes a claim set from a JWS payload.
-func Decode(payload string) (*ClaimSet, error) {
- // decode returned id token to get expiry
- s := strings.Split(payload, ".")
- if len(s) < 2 {
- // TODO(jbd): Provide more context about the error.
- return nil, errors.New("jws: invalid token received")
- }
- decoded, err := base64Decode(s[1])
- if err != nil {
- return nil, err
- }
- c := &ClaimSet{}
- err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
- return c, err
-}
-
-// Signer returns a signature for the given data.
-type Signer func(data []byte) (sig []byte, err error)
-
-// EncodeWithSigner encodes a header and claim set with the provided signer.
-func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
- head, err := header.encode()
- if err != nil {
- return "", err
- }
- cs, err := c.encode()
- if err != nil {
- return "", err
- }
- ss := fmt.Sprintf("%s.%s", head, cs)
- sig, err := sg([]byte(ss))
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil
-}
-
-// Encode encodes a signed JWS with provided header and claim set.
-// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
-func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
- sg := func(data []byte) (sig []byte, err error) {
- h := sha256.New()
- h.Write([]byte(data))
- return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
- }
- return EncodeWithSigner(header, c, sg)
-}
-
-// base64Encode returns and Base64url encoded version of the input string with any
-// trailing "=" stripped.
-func base64Encode(b []byte) string {
- return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
-}
-
-// base64Decode decodes the Base64url encoded string
-func base64Decode(s string) ([]byte, error) {
- // add back missing padding
- switch len(s) % 4 {
- case 1:
- s += "==="
- case 2:
- s += "=="
- case 3:
- s += "="
- }
- return base64.URLEncoding.DecodeString(s)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go
deleted file mode 100644
index 2ffad21a60..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
-// known as "two-legged OAuth 2.0".
-//
-// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
-package jwt
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/internal"
- "golang.org/x/oauth2/jws"
-)
-
-var (
- defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
- defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
-)
-
-// Config is the configuration for using JWT to fetch tokens,
-// commonly known as "two-legged OAuth 2.0".
-type Config struct {
- // Email is the OAuth client identifier used when communicating with
- // the configured OAuth provider.
- Email string
-
- // PrivateKey contains the contents of an RSA private key or the
- // contents of a PEM file that contains a private key. The provided
- // private key is used to sign JWT payloads.
- // PEM containers with a passphrase are not supported.
- // Use the following command to convert a PKCS 12 file into a PEM.
- //
- // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
- //
- PrivateKey []byte
-
- // Subject is the optional user to impersonate.
- Subject string
-
- // Scopes optionally specifies a list of requested permission scopes.
- Scopes []string
-
- // TokenURL is the endpoint required to complete the 2-legged JWT flow.
- TokenURL string
-
- // Expires optionally specifies how long the token is valid for.
- Expires time.Duration
-}
-
-// TokenSource returns a JWT TokenSource using the configuration
-// in c and the HTTP client from the provided context.
-func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
- return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
-}
-
-// Client returns an HTTP client wrapping the context's
-// HTTP transport and adding Authorization headers with tokens
-// obtained from c.
-//
-// The returned client and its Transport should not be modified.
-func (c *Config) Client(ctx context.Context) *http.Client {
- return oauth2.NewClient(ctx, c.TokenSource(ctx))
-}
-
-// jwtSource is a source that always does a signed JWT request for a token.
-// It should typically be wrapped with a reuseTokenSource.
-type jwtSource struct {
- ctx context.Context
- conf *Config
-}
-
-func (js jwtSource) Token() (*oauth2.Token, error) {
- pk, err := internal.ParseKey(js.conf.PrivateKey)
- if err != nil {
- return nil, err
- }
- hc := oauth2.NewClient(js.ctx, nil)
- claimSet := &jws.ClaimSet{
- Iss: js.conf.Email,
- Scope: strings.Join(js.conf.Scopes, " "),
- Aud: js.conf.TokenURL,
- }
- if subject := js.conf.Subject; subject != "" {
- claimSet.Sub = subject
- // prn is the old name of sub. Keep setting it
- // to be compatible with legacy OAuth 2.0 providers.
- claimSet.Prn = subject
- }
- if t := js.conf.Expires; t > 0 {
- claimSet.Exp = time.Now().Add(t).Unix()
- }
- payload, err := jws.Encode(defaultHeader, claimSet, pk)
- if err != nil {
- return nil, err
- }
- v := url.Values{}
- v.Set("grant_type", defaultGrantType)
- v.Set("assertion", payload)
- resp, err := hc.PostForm(js.conf.TokenURL, v)
- if err != nil {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
- }
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
- if err != nil {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
- }
- if c := resp.StatusCode; c < 200 || c > 299 {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
- }
- // tokenRes is the JSON response body.
- var tokenRes struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- IDToken string `json:"id_token"`
- ExpiresIn int64 `json:"expires_in"` // relative seconds from now
- }
- if err := json.Unmarshal(body, &tokenRes); err != nil {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
- }
- token := &oauth2.Token{
- AccessToken: tokenRes.AccessToken,
- TokenType: tokenRes.TokenType,
- }
- raw := make(map[string]interface{})
- json.Unmarshal(body, &raw) // no error checks for optional fields
- token = token.WithExtra(raw)
-
- if secs := tokenRes.ExpiresIn; secs > 0 {
- token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
- }
- if v := tokenRes.IDToken; v != "" {
- // decode returned id token to get expiry
- claimSet, err := jws.Decode(v)
- if err != nil {
- return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
- }
- token.Expiry = time.Unix(claimSet.Exp, 0)
- }
- return token, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go
deleted file mode 100644
index 9b7b977dab..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests.
-// It can additionally grant authorization with Bearer JWT.
-package oauth2
-
-import (
- "bytes"
- "errors"
- "net/http"
- "net/url"
- "strings"
- "sync"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/internal"
-)
-
-// NoContext is the default context you should supply if not using
-// your own context.Context (see https://golang.org/x/net/context).
-var NoContext = context.TODO()
-
-// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
-// identified by the tokenURL prefix as an OAuth2 implementation
-// which doesn't support the HTTP Basic authentication
-// scheme to authenticate with the authorization server.
-// Once a server is registered, credentials (client_id and client_secret)
-// will be passed as query parameters rather than being present
-// in the Authorization header.
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- internal.RegisterBrokenAuthHeaderProvider(tokenURL)
-}
-
-// Config describes a typical 3-legged OAuth2 flow, with both the
-// client application information and the server's endpoint URLs.
-type Config struct {
- // ClientID is the application's ID.
- ClientID string
-
- // ClientSecret is the application's secret.
- ClientSecret string
-
- // Endpoint contains the resource server's token endpoint
- // URLs. These are constants specific to each server and are
- // often available via site-specific packages, such as
- // google.Endpoint or github.Endpoint.
- Endpoint Endpoint
-
- // RedirectURL is the URL to redirect users going through
- // the OAuth flow, after the resource owner's URLs.
- RedirectURL string
-
- // Scope specifies optional requested permissions.
- Scopes []string
-}
-
-// A TokenSource is anything that can return a token.
-type TokenSource interface {
- // Token returns a token or an error.
- // Token must be safe for concurrent use by multiple goroutines.
- // The returned Token must not be modified.
- Token() (*Token, error)
-}
-
-// Endpoint contains the OAuth 2.0 provider's authorization and token
-// endpoint URLs.
-type Endpoint struct {
- AuthURL string
- TokenURL string
-}
-
-var (
- // AccessTypeOnline and AccessTypeOffline are options passed
- // to the Options.AuthCodeURL method. They modify the
- // "access_type" field that gets sent in the URL returned by
- // AuthCodeURL.
- //
- // Online is the default if neither is specified. If your
- // application needs to refresh access tokens when the user
- // is not present at the browser, then use offline. This will
- // result in your application obtaining a refresh token the
- // first time your application exchanges an authorization
- // code for a user.
- AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
- AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
-
- // ApprovalForce forces the users to view the consent dialog
- // and confirm the permissions request at the URL returned
- // from AuthCodeURL, even if they've already done so.
- ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
-)
-
-// An AuthCodeOption is passed to Config.AuthCodeURL.
-type AuthCodeOption interface {
- setValue(url.Values)
-}
-
-type setParam struct{ k, v string }
-
-func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
-
-// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
-// to a provider's authorization endpoint.
-func SetAuthURLParam(key, value string) AuthCodeOption {
- return setParam{key, value}
-}
-
-// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
-// that asks for permissions for the required scopes explicitly.
-//
-// State is a token to protect the user from CSRF attacks. You must
-// always provide a non-zero string and validate that it matches the
-// the state query parameter on your redirect callback.
-// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
-//
-// Opts may include AccessTypeOnline or AccessTypeOffline, as well
-// as ApprovalForce.
-func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
- var buf bytes.Buffer
- buf.WriteString(c.Endpoint.AuthURL)
- v := url.Values{
- "response_type": {"code"},
- "client_id": {c.ClientID},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- "state": internal.CondVal(state),
- }
- for _, opt := range opts {
- opt.setValue(v)
- }
- if strings.Contains(c.Endpoint.AuthURL, "?") {
- buf.WriteByte('&')
- } else {
- buf.WriteByte('?')
- }
- buf.WriteString(v.Encode())
- return buf.String()
-}
-
-// PasswordCredentialsToken converts a resource owner username and password
-// pair into a token.
-//
-// Per the RFC, this grant type should only be used "when there is a high
-// degree of trust between the resource owner and the client (e.g., the client
-// is part of the device operating system or a highly privileged application),
-// and when other authorization grant types are not available."
-// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
-//
-// The HTTP client to use is derived from the context.
-// If nil, http.DefaultClient is used.
-func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
- "grant_type": {"password"},
- "username": {username},
- "password": {password},
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- })
-}
-
-// Exchange converts an authorization code into a token.
-//
-// It is used after a resource provider redirects the user back
-// to the Redirect URI (the URL obtained from AuthCodeURL).
-//
-// The HTTP client to use is derived from the context.
-// If a client is not provided via the context, http.DefaultClient is used.
-//
-// The code will be in the *http.Request.FormValue("code"). Before
-// calling Exchange, be sure to validate FormValue("state").
-func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- })
-}
-
-// Client returns an HTTP client using the provided token.
-// The token will auto-refresh as necessary. The underlying
-// HTTP transport will be obtained using the provided context.
-// The returned client and its Transport should not be modified.
-func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
- return NewClient(ctx, c.TokenSource(ctx, t))
-}
-
-// TokenSource returns a TokenSource that returns t until t expires,
-// automatically refreshing it as necessary using the provided context.
-//
-// Most users will use Config.Client instead.
-func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
- tkr := &tokenRefresher{
- ctx: ctx,
- conf: c,
- }
- if t != nil {
- tkr.refreshToken = t.RefreshToken
- }
- return &reuseTokenSource{
- t: t,
- new: tkr,
- }
-}
-
-// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
-// HTTP requests to renew a token using a RefreshToken.
-type tokenRefresher struct {
- ctx context.Context // used to get HTTP requests
- conf *Config
- refreshToken string
-}
-
-// WARNING: Token is not safe for concurrent access, as it
-// updates the tokenRefresher's refreshToken field.
-// Within this package, it is used by reuseTokenSource which
-// synchronizes calls to this method with its own mutex.
-func (tf *tokenRefresher) Token() (*Token, error) {
- if tf.refreshToken == "" {
- return nil, errors.New("oauth2: token expired and refresh token is not set")
- }
-
- tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
- "grant_type": {"refresh_token"},
- "refresh_token": {tf.refreshToken},
- })
-
- if err != nil {
- return nil, err
- }
- if tf.refreshToken != tk.RefreshToken {
- tf.refreshToken = tk.RefreshToken
- }
- return tk, err
-}
-
-// reuseTokenSource is a TokenSource that holds a single token in memory
-// and validates its expiry before each call to retrieve it with
-// Token. If it's expired, it will be auto-refreshed using the
-// new TokenSource.
-type reuseTokenSource struct {
- new TokenSource // called when t is expired.
-
- mu sync.Mutex // guards t
- t *Token
-}
-
-// Token returns the current token if it's still valid, else will
-// refresh the current token (using r.Context for HTTP client
-// information) and return the new one.
-func (s *reuseTokenSource) Token() (*Token, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.t.Valid() {
- return s.t, nil
- }
- t, err := s.new.Token()
- if err != nil {
- return nil, err
- }
- s.t = t
- return t, nil
-}
-
-// StaticTokenSource returns a TokenSource that always returns the same token.
-// Because the provided token t is never refreshed, StaticTokenSource is only
-// useful for tokens that never expire.
-func StaticTokenSource(t *Token) TokenSource {
- return staticTokenSource{t}
-}
-
-// staticTokenSource is a TokenSource that always returns the same Token.
-type staticTokenSource struct {
- t *Token
-}
-
-func (s staticTokenSource) Token() (*Token, error) {
- return s.t, nil
-}
-
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
-var HTTPClient internal.ContextKey
-
-// NewClient creates an *http.Client from a Context and TokenSource.
-// The returned client is not valid beyond the lifetime of the context.
-//
-// As a special case, if src is nil, a non-OAuth2 client is returned
-// using the provided context. This exists to support related OAuth2
-// packages.
-func NewClient(ctx context.Context, src TokenSource) *http.Client {
- if src == nil {
- c, err := internal.ContextClient(ctx)
- if err != nil {
- return &http.Client{Transport: internal.ErrorTransport{err}}
- }
- return c
- }
- return &http.Client{
- Transport: &Transport{
- Base: internal.ContextTransport(ctx),
- Source: ReuseTokenSource(nil, src),
- },
- }
-}
-
-// ReuseTokenSource returns a TokenSource which repeatedly returns the
-// same token as long as it's valid, starting with t.
-// When its cached token is invalid, a new token is obtained from src.
-//
-// ReuseTokenSource is typically used to reuse tokens from a cache
-// (such as a file on disk) between runs of a program, rather than
-// obtaining new tokens unnecessarily.
-//
-// The initial token t may be nil, in which case the TokenSource is
-// wrapped in a caching version if it isn't one already. This also
-// means it's always safe to wrap ReuseTokenSource around any other
-// TokenSource without adverse effects.
-func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
- // Don't wrap a reuseTokenSource in itself. That would work,
- // but cause an unnecessary number of mutex operations.
- // Just build the equivalent one.
- if rt, ok := src.(*reuseTokenSource); ok {
- if t == nil {
- // Just use it directly.
- return rt
- }
- src = rt.new
- }
- return &reuseTokenSource{
- t: t,
- new: src,
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go
deleted file mode 100644
index 7a3167f15b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package oauth2
-
-import (
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/internal"
-)
-
-// expiryDelta determines how earlier a token should be considered
-// expired than its actual expiration time. It is used to avoid late
-// expirations due to client-server time mismatches.
-const expiryDelta = 10 * time.Second
-
-// Token represents the crendentials used to authorize
-// the requests to access protected resources on the OAuth 2.0
-// provider's backend.
-//
-// Most users of this package should not access fields of Token
-// directly. They're exported mostly for use by related packages
-// implementing derivative OAuth2 flows.
-type Token struct {
- // AccessToken is the token that authorizes and authenticates
- // the requests.
- AccessToken string `json:"access_token"`
-
- // TokenType is the type of token.
- // The Type method returns either this or "Bearer", the default.
- TokenType string `json:"token_type,omitempty"`
-
- // RefreshToken is a token that's used by the application
- // (as opposed to the user) to refresh the access token
- // if it expires.
- RefreshToken string `json:"refresh_token,omitempty"`
-
- // Expiry is the optional expiration time of the access token.
- //
- // If zero, TokenSource implementations will reuse the same
- // token forever and RefreshToken or equivalent
- // mechanisms for that TokenSource will not be used.
- Expiry time.Time `json:"expiry,omitempty"`
-
- // raw optionally contains extra metadata from the server
- // when updating a token.
- raw interface{}
-}
-
-// Type returns t.TokenType if non-empty, else "Bearer".
-func (t *Token) Type() string {
- if strings.EqualFold(t.TokenType, "bearer") {
- return "Bearer"
- }
- if strings.EqualFold(t.TokenType, "mac") {
- return "MAC"
- }
- if strings.EqualFold(t.TokenType, "basic") {
- return "Basic"
- }
- if t.TokenType != "" {
- return t.TokenType
- }
- return "Bearer"
-}
-
-// SetAuthHeader sets the Authorization header to r using the access
-// token in t.
-//
-// This method is unnecessary when using Transport or an HTTP Client
-// returned by this package.
-func (t *Token) SetAuthHeader(r *http.Request) {
- r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
-}
-
-// WithExtra returns a new Token that's a clone of t, but using the
-// provided raw extra map. This is only intended for use by packages
-// implementing derivative OAuth2 flows.
-func (t *Token) WithExtra(extra interface{}) *Token {
- t2 := new(Token)
- *t2 = *t
- t2.raw = extra
- return t2
-}
-
-// Extra returns an extra field.
-// Extra fields are key-value pairs returned by the server as a
-// part of the token retrieval response.
-func (t *Token) Extra(key string) interface{} {
- if raw, ok := t.raw.(map[string]interface{}); ok {
- return raw[key]
- }
-
- vals, ok := t.raw.(url.Values)
- if !ok {
- return nil
- }
-
- v := vals.Get(key)
- switch s := strings.TrimSpace(v); strings.Count(s, ".") {
- case 0: // Contains no "."; try to parse as int
- if i, err := strconv.ParseInt(s, 10, 64); err == nil {
- return i
- }
- case 1: // Contains a single "."; try to parse as float
- if f, err := strconv.ParseFloat(s, 64); err == nil {
- return f
- }
- }
-
- return v
-}
-
-// expired reports whether the token is expired.
-// t must be non-nil.
-func (t *Token) expired() bool {
- if t.Expiry.IsZero() {
- return false
- }
- return t.Expiry.Add(-expiryDelta).Before(time.Now())
-}
-
-// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
-func (t *Token) Valid() bool {
- return t != nil && t.AccessToken != "" && !t.expired()
-}
-
-// tokenFromInternal maps an *internal.Token struct into
-// a *Token struct.
-func tokenFromInternal(t *internal.Token) *Token {
- if t == nil {
- return nil
- }
- return &Token{
- AccessToken: t.AccessToken,
- TokenType: t.TokenType,
- RefreshToken: t.RefreshToken,
- Expiry: t.Expiry,
- raw: t.Raw,
- }
-}
-
-// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
-// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
-// with an error..
-func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
- tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
- if err != nil {
- return nil, err
- }
- return tokenFromInternal(tk), nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go
deleted file mode 100644
index 92ac7e2531..0000000000
--- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package oauth2
-
-import (
- "errors"
- "io"
- "net/http"
- "sync"
-)
-
-// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
-// wrapping a base RoundTripper and adding an Authorization header
-// with a token from the supplied Sources.
-//
-// Transport is a low-level mechanism. Most code will use the
-// higher-level Config.Client method instead.
-type Transport struct {
- // Source supplies the token to add to outgoing requests'
- // Authorization headers.
- Source TokenSource
-
- // Base is the base RoundTripper used to make HTTP requests.
- // If nil, http.DefaultTransport is used.
- Base http.RoundTripper
-
- mu sync.Mutex // guards modReq
- modReq map[*http.Request]*http.Request // original -> modified
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- if t.Source == nil {
- return nil, errors.New("oauth2: Transport's Source is nil")
- }
- token, err := t.Source.Token()
- if err != nil {
- return nil, err
- }
-
- req2 := cloneRequest(req) // per RoundTripper contract
- token.SetAuthHeader(req2)
- t.setModReq(req, req2)
- res, err := t.base().RoundTrip(req2)
- if err != nil {
- t.setModReq(req, nil)
- return nil, err
- }
- res.Body = &onEOFReader{
- rc: res.Body,
- fn: func() { t.setModReq(req, nil) },
- }
- return res, nil
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *Transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- t.mu.Lock()
- modReq := t.modReq[req]
- delete(t.modReq, req)
- t.mu.Unlock()
- cr.CancelRequest(modReq)
- }
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-func (t *Transport) setModReq(orig, mod *http.Request) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.modReq == nil {
- t.modReq = make(map[*http.Request]*http.Request)
- }
- if mod == nil {
- delete(t.modReq, orig)
- } else {
- t.modReq[orig] = mod
- }
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
-
-type onEOFReader struct {
- rc io.ReadCloser
- fn func()
-}
-
-func (r *onEOFReader) Read(p []byte) (n int, err error) {
- n, err = r.rc.Read(p)
- if err == io.EOF {
- r.runFunc()
- }
- return
-}
-
-func (r *onEOFReader) Close() error {
- err := r.rc.Close()
- r.runFunc()
- return err
-}
-
-func (r *onEOFReader) runFunc() {
- if fn := r.fn; fn != nil {
- fn()
- r.fn = nil
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE
deleted file mode 100644
index 263aa7a0c1..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2011 Google Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go
deleted file mode 100644
index 1356140472..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "math/rand"
- "time"
-)
-
-type BackoffStrategy interface {
- // Pause returns the duration of the next pause and true if the operation should be
- // retried, or false if no further retries should be attempted.
- Pause() (time.Duration, bool)
-
- // Reset restores the strategy to its initial state.
- Reset()
-}
-
-// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
-// The initial pause time is given by Base.
-// Once the total pause time exceeds Max, Pause will indicate no further retries.
-type ExponentialBackoff struct {
- Base time.Duration
- Max time.Duration
- total time.Duration
- n uint
-}
-
-func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
- if eb.total > eb.Max {
- return 0, false
- }
-
- // The next pause is selected from randomly from [0, 2^n * Base).
- d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
- eb.total += d
- eb.n++
- return d, true
-}
-
-func (eb *ExponentialBackoff) Reset() {
- eb.n = 0
- eb.total = 0
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go
deleted file mode 100644
index 4b8ec14244..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "bytes"
- "io"
-
- "google.golang.org/api/googleapi"
-)
-
-// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
-type ResumableBuffer struct {
- media io.Reader
-
- chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
- err error // Any error generated when populating chunk by reading media.
-
- // The absolute position of chunk in the underlying media.
- off int64
-}
-
-func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer {
- return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
-}
-
-// Chunk returns the current buffered chunk, the offset in the underlying media
-// from which the chunk is drawn, and the size of the chunk.
-// Successive calls to Chunk return the same chunk between calls to Next.
-func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
- // There may already be data in chunk if Next has not been called since the previous call to Chunk.
- if rb.err == nil && len(rb.chunk) == 0 {
- rb.err = rb.loadChunk()
- }
- return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err
-}
-
-// loadChunk will read from media into chunk, up to the capacity of chunk.
-func (rb *ResumableBuffer) loadChunk() error {
- bufSize := cap(rb.chunk)
- rb.chunk = rb.chunk[:bufSize]
-
- read := 0
- var err error
- for err == nil && read < bufSize {
- var n int
- n, err = rb.media.Read(rb.chunk[read:])
- read += n
- }
- rb.chunk = rb.chunk[:read]
- return err
-}
-
-// Next advances to the next chunk, which will be returned by the next call to Chunk.
-// Calls to Next without a corresponding prior call to Chunk will have no effect.
-func (rb *ResumableBuffer) Next() {
- rb.off += int64(len(rb.chunk))
- rb.chunk = rb.chunk[0:0]
-}
-
-type readerTyper struct {
- io.Reader
- googleapi.ContentTyper
-}
-
-// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
-// If ra implements googleapi.ContentTyper, then the returned reader
-// will also implement googleapi.ContentTyper, delegating to ra.
-func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
- r := io.NewSectionReader(ra, 0, size)
- if typer, ok := ra.(googleapi.ContentTyper); ok {
- return readerTyper{r, typer}
- }
- return r
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go
deleted file mode 100644
index 752c4b411b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package gensupport is an internal implementation detail used by code
-// generated by the google-api-go-generator tool.
-//
-// This package may be modified at any time without regard for backwards
-// compatibility. It should not be used directly by API users.
-package gensupport
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go
deleted file mode 100644
index dd7bcd2eb0..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strings"
-)
-
-// MarshalJSON returns a JSON encoding of schema containing only selected fields.
-// A field is selected if:
-// * it has a non-empty value, or
-// * its field name is present in forceSendFields, and
-// * it is not a nil pointer or nil interface.
-// The JSON key for each selected field is taken from the field's json: struct tag.
-func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
- if len(forceSendFields) == 0 {
- return json.Marshal(schema)
- }
-
- mustInclude := make(map[string]struct{})
- for _, f := range forceSendFields {
- mustInclude[f] = struct{}{}
- }
-
- dataMap, err := schemaToMap(schema, mustInclude)
- if err != nil {
- return nil, err
- }
- return json.Marshal(dataMap)
-}
-
-func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
- m := make(map[string]interface{})
- s := reflect.ValueOf(schema)
- st := s.Type()
-
- for i := 0; i < s.NumField(); i++ {
- jsonTag := st.Field(i).Tag.Get("json")
- if jsonTag == "" {
- continue
- }
- tag, err := parseJSONTag(jsonTag)
- if err != nil {
- return nil, err
- }
- if tag.ignore {
- continue
- }
-
- v := s.Field(i)
- f := st.Field(i)
- if !includeField(v, f, mustInclude) {
- continue
- }
-
- // nil maps are treated as empty maps.
- if f.Type.Kind() == reflect.Map && v.IsNil() {
- m[tag.apiName] = map[string]string{}
- continue
- }
-
- // nil slices are treated as empty slices.
- if f.Type.Kind() == reflect.Slice && v.IsNil() {
- m[tag.apiName] = []bool{}
- continue
- }
-
- if tag.stringFormat {
- m[tag.apiName] = formatAsString(v, f.Type.Kind())
- } else {
- m[tag.apiName] = v.Interface()
- }
- }
- return m, nil
-}
-
-// formatAsString returns a string representation of v, dereferencing it first if possible.
-func formatAsString(v reflect.Value, kind reflect.Kind) string {
- if kind == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
-
- return fmt.Sprintf("%v", v.Interface())
-}
-
-// jsonTag represents a restricted version of the struct tag format used by encoding/json.
-// It is used to describe the JSON encoding of fields in a Schema struct.
-type jsonTag struct {
- apiName string
- stringFormat bool
- ignore bool
-}
-
-// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
-// The format of the tag must match that generated by the Schema.writeSchemaStruct method
-// in the api generator.
-func parseJSONTag(val string) (jsonTag, error) {
- if val == "-" {
- return jsonTag{ignore: true}, nil
- }
-
- var tag jsonTag
-
- i := strings.Index(val, ",")
- if i == -1 || val[:i] == "" {
- return tag, fmt.Errorf("malformed json tag: %s", val)
- }
-
- tag = jsonTag{
- apiName: val[:i],
- }
-
- switch val[i+1:] {
- case "omitempty":
- case "omitempty,string":
- tag.stringFormat = true
- default:
- return tag, fmt.Errorf("malformed json tag: %s", val)
- }
-
- return tag, nil
-}
-
-// Reports whether the struct field "f" with value "v" should be included in JSON output.
-func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
- // The regular JSON encoding of a nil pointer is "null", which means "delete this field".
- // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
- // However, many fields are not pointers, so there would be no way to delete these fields.
- // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
- // Deletion will be handled by a separate mechanism.
- if f.Type.Kind() == reflect.Ptr && v.IsNil() {
- return false
- }
-
- // The "any" type is represented as an interface{}. If this interface
- // is nil, there is no reasonable representation to send. We ignore
- // these fields, for the same reasons as given above for pointers.
- if f.Type.Kind() == reflect.Interface && v.IsNil() {
- return false
- }
-
- _, ok := mustInclude[f.Name]
- return ok || !isEmptyValue(v)
-}
-
-// isEmptyValue reports whether v is the empty value for its type. This
-// implementation is based on that of the encoding/json package, but its
-// correctness does not depend on it being identical. What's important is that
-// this function return false in situations where v should not be sent as part
-// of a PATCH operation.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go
deleted file mode 100644
index 817f46f5d2..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "net/textproto"
-
- "google.golang.org/api/googleapi"
-)
-
-const sniffBuffSize = 512
-
-func newContentSniffer(r io.Reader) *contentSniffer {
- return &contentSniffer{r: r}
-}
-
-// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
-type contentSniffer struct {
- r io.Reader
- start []byte // buffer for the sniffed bytes.
- err error // set to any error encountered while reading bytes to be sniffed.
-
- ctype string // set on first sniff.
- sniffed bool // set to true on first sniff.
-}
-
-func (cs *contentSniffer) Read(p []byte) (n int, err error) {
- // Ensure that the content type is sniffed before any data is consumed from Reader.
- _, _ = cs.ContentType()
-
- if len(cs.start) > 0 {
- n := copy(p, cs.start)
- cs.start = cs.start[n:]
- return n, nil
- }
-
- // We may have read some bytes into start while sniffing, even if the read ended in an error.
- // We should first return those bytes, then the error.
- if cs.err != nil {
- return 0, cs.err
- }
-
- // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
- return cs.r.Read(p)
-}
-
-// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
-func (cs *contentSniffer) ContentType() (string, bool) {
- if cs.sniffed {
- return cs.ctype, cs.ctype != ""
- }
- cs.sniffed = true
- // If ReadAll hits EOF, it returns err==nil.
- cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
-
- // Don't try to detect the content type based on possibly incomplete data.
- if cs.err != nil {
- return "", false
- }
-
- cs.ctype = http.DetectContentType(cs.start)
- return cs.ctype, true
-}
-
-// DetermineContentType determines the content type of the supplied reader.
-// If the content type is already known, it can be specified via ctype.
-// Otherwise, the content of media will be sniffed to determine the content type.
-// If media implements googleapi.ContentTyper (deprecated), this will be used
-// instead of sniffing the content.
-// After calling DetectContentType the caller must not perform further reads on
-// media, but rather read from the Reader that is returned.
-func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
- // Note: callers could avoid calling DetectContentType if ctype != "",
- // but doing the check inside this function reduces the amount of
- // generated code.
- if ctype != "" {
- return media, ctype
- }
-
- // For backwards compatability, allow clients to set content
- // type by providing a ContentTyper for media.
- if typer, ok := media.(googleapi.ContentTyper); ok {
- return media, typer.ContentType()
- }
-
- sniffer := newContentSniffer(media)
- if ctype, ok := sniffer.ContentType(); ok {
- return sniffer, ctype
- }
- // If content type could not be sniffed, reads from sniffer will eventually fail with an error.
- return sniffer, ""
-}
-
-type typeReader struct {
- io.Reader
- typ string
-}
-
-// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body.
-// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
-type multipartReader struct {
- pr *io.PipeReader
- pipeOpen bool
- ctype string
-}
-
-func newMultipartReader(parts []typeReader) *multipartReader {
- mp := &multipartReader{pipeOpen: true}
- var pw *io.PipeWriter
- mp.pr, pw = io.Pipe()
- mpw := multipart.NewWriter(pw)
- mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
- go func() {
- for _, part := range parts {
- w, err := mpw.CreatePart(typeHeader(part.typ))
- if err != nil {
- mpw.Close()
- pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
- return
- }
- _, err = io.Copy(w, part.Reader)
- if err != nil {
- mpw.Close()
- pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
- return
- }
- }
-
- mpw.Close()
- pw.Close()
- }()
- return mp
-}
-
-func (mp *multipartReader) Read(data []byte) (n int, err error) {
- return mp.pr.Read(data)
-}
-
-func (mp *multipartReader) Close() error {
- if !mp.pipeOpen {
- return nil
- }
- mp.pipeOpen = false
- return mp.pr.Close()
-}
-
-// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
-// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
-//
-// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
-func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
- mp := newMultipartReader([]typeReader{
- {body, bodyContentType},
- {media, mediaContentType},
- })
- return mp, mp.ctype
-}
-
-func typeHeader(contentType string) textproto.MIMEHeader {
- h := make(textproto.MIMEHeader)
- if contentType != "" {
- h.Set("Content-Type", contentType)
- }
- return h
-}
-
-// PrepareUpload determines whether the data in the supplied reader should be
-// uploaded in a single request, or in sequential chunks.
-// chunkSize is the size of the chunk that media should be split into.
-// If chunkSize is non-zero and the contents of media do not fit in a single
-// chunk (or there is an error reading media), then media will be returned as a
-// ResumableBuffer. Otherwise, media will be returned as a Reader.
-//
-// After PrepareUpload has been called, media should no longer be used: the
-// media content should be accessed via one of the return values.
-func PrepareUpload(media io.Reader, chunkSize int) (io.Reader,
- *ResumableBuffer) {
- if chunkSize == 0 { // do not chunk
- return media, nil
- }
-
- rb := NewResumableBuffer(media, chunkSize)
- rdr, _, _, err := rb.Chunk()
-
- if err == io.EOF { // we can upload this in a single request
- return rdr, nil
- }
- // err might be a non-EOF error. If it is, the next call to rb.Chunk will
- // return the same error. Returning a ResumableBuffer ensures that this error
- // will be handled at some point.
-
- return nil, rb
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go
deleted file mode 100644
index 3b3c743967..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "net/url"
-
- "google.golang.org/api/googleapi"
-)
-
-// URLParams is a simplified replacement for url.Values
-// that safely builds up URL parameters for encoding.
-type URLParams map[string][]string
-
-// Get returns the first value for the given key, or "".
-func (u URLParams) Get(key string) string {
- vs := u[key]
- if len(vs) == 0 {
- return ""
- }
- return vs[0]
-}
-
-// Set sets the key to value.
-// It replaces any existing values.
-func (u URLParams) Set(key, value string) {
- u[key] = []string{value}
-}
-
-// SetMulti sets the key to an array of values.
-// It replaces any existing values.
-// Note that values must not be modified after calling SetMulti
-// so the caller is responsible for making a copy if necessary.
-func (u URLParams) SetMulti(key string, values []string) {
- u[key] = values
-}
-
-// Encode encodes the values into ``URL encoded'' form
-// ("bar=baz&foo=quux") sorted by key.
-func (u URLParams) Encode() string {
- return url.Values(u).Encode()
-}
-
-func SetOptions(u URLParams, opts ...googleapi.CallOption) {
- for _, o := range opts {
- u.Set(o.Get())
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go
deleted file mode 100644
index b3e774aa49..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gensupport
-
-import (
- "fmt"
- "io"
- "net/http"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/context/ctxhttp"
-)
-
-const (
- // statusResumeIncomplete is the code returned by the Google uploader
- // when the transfer is not yet complete.
- statusResumeIncomplete = 308
-
- // statusTooManyRequests is returned by the storage API if the
- // per-project limits have been temporarily exceeded. The request
- // should be retried.
- // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
- statusTooManyRequests = 429
-)
-
-// ResumableUpload is used by the generated APIs to provide resumable uploads.
-// It is not used by developers directly.
-type ResumableUpload struct {
- Client *http.Client
- // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
- URI string
- UserAgent string // User-Agent for header of the request
- // Media is the object being uploaded.
- Media *ResumableBuffer
- // MediaType defines the media type, e.g. "image/jpeg".
- MediaType string
-
- mu sync.Mutex // guards progress
- progress int64 // number of bytes uploaded so far
-
- // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
- Callback func(int64)
-
- // If not specified, a default exponential backoff strategy will be used.
- Backoff BackoffStrategy
-}
-
-// Progress returns the number of bytes uploaded at this point.
-func (rx *ResumableUpload) Progress() int64 {
- rx.mu.Lock()
- defer rx.mu.Unlock()
- return rx.progress
-}
-
-// doUploadRequest performs a single HTTP request to upload data.
-// off specifies the offset in rx.Media from which data is drawn.
-// size is the number of bytes in data.
-// final specifies whether data is the final chunk to be uploaded.
-func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
- req, err := http.NewRequest("POST", rx.URI, data)
- if err != nil {
- return nil, err
- }
-
- req.ContentLength = size
- var contentRange string
- if final {
- if size == 0 {
- contentRange = fmt.Sprintf("bytes */%v", off)
- } else {
- contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
- }
- } else {
- contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
- }
- req.Header.Set("Content-Range", contentRange)
- req.Header.Set("Content-Type", rx.MediaType)
- req.Header.Set("User-Agent", rx.UserAgent)
- return ctxhttp.Do(ctx, rx.Client, req)
-
-}
-
-// reportProgress calls a user-supplied callback to report upload progress.
-// If old==updated, the callback is not called.
-func (rx *ResumableUpload) reportProgress(old, updated int64) {
- if updated-old == 0 {
- return
- }
- rx.mu.Lock()
- rx.progress = updated
- rx.mu.Unlock()
- if rx.Callback != nil {
- rx.Callback(updated)
- }
-}
-
-// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
-func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
- chunk, off, size, err := rx.Media.Chunk()
-
- done := err == io.EOF
- if !done && err != nil {
- return nil, err
- }
-
- res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
- if err != nil {
- return res, err
- }
-
- if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
- rx.reportProgress(off, off+int64(size))
- }
-
- if res.StatusCode == statusResumeIncomplete {
- rx.Media.Next()
- }
- return res, nil
-}
-
-func contextDone(ctx context.Context) bool {
- select {
- case <-ctx.Done():
- return true
- default:
- return false
- }
-}
-
-// Upload starts the process of a resumable upload with a cancellable context.
-// It retries using the provided back off strategy until cancelled or the
-// strategy indicates to stop retrying.
-// It is called from the auto-generated API code and is not visible to the user.
-// rx is private to the auto-generated API code.
-// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
-func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
- var pause time.Duration
- backoff := rx.Backoff
- if backoff == nil {
- backoff = DefaultBackoffStrategy()
- }
-
- for {
- // Ensure that we return in the case of cancelled context, even if pause is 0.
- if contextDone(ctx) {
- return nil, ctx.Err()
- }
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-time.After(pause):
- }
-
- resp, err = rx.transferChunk(ctx)
-
- var status int
- if resp != nil {
- status = resp.StatusCode
- }
-
- // Check if we should retry the request.
- if shouldRetry(status, err) {
- var retry bool
- pause, retry = backoff.Pause()
- if retry {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
- continue
- }
- }
-
- // If the chunk was uploaded successfully, but there's still
- // more to go, upload the next chunk without any delay.
- if status == statusResumeIncomplete {
- pause = 0
- backoff.Reset()
- resp.Body.Close()
- continue
- }
-
- // It's possible for err and resp to both be non-nil here, but we expose a simpler
- // contract to our callers: exactly one of resp and err will be non-nil. This means
- // that any response body must be closed here before returning a non-nil error.
- if err != nil {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
- return nil, err
- }
-
- return resp, nil
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go
deleted file mode 100644
index 7f83d1da99..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package gensupport
-
-import (
- "io"
- "net"
- "net/http"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// Retry invokes the given function, retrying it multiple times if the connection failed or
-// the HTTP status response indicates the request should be attempted again. ctx may be nil.
-func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
- for {
- resp, err := f()
-
- var status int
- if resp != nil {
- status = resp.StatusCode
- }
-
- // Return if we shouldn't retry.
- pause, retry := backoff.Pause()
- if !shouldRetry(status, err) || !retry {
- return resp, err
- }
-
- // Ensure the response body is closed, if any.
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
-
- // Pause, but still listen to ctx.Done if context is not nil.
- var done <-chan struct{}
- if ctx != nil {
- done = ctx.Done()
- }
- select {
- case <-done:
- return nil, ctx.Err()
- case <-time.After(pause):
- }
- }
-}
-
-// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
-func DefaultBackoffStrategy() BackoffStrategy {
- return &ExponentialBackoff{
- Base: 250 * time.Millisecond,
- Max: 16 * time.Second,
- }
-}
-
-// shouldRetry returns true if the HTTP response / error indicates that the
-// request should be attempted again.
-func shouldRetry(status int, err error) bool {
- // Retry for 5xx response codes.
- if 500 <= status && status < 600 {
- return true
- }
-
- // Retry on statusTooManyRequests{
- if status == statusTooManyRequests {
- return true
- }
-
- // Retry on unexpected EOFs and temporary network errors.
- if err == io.ErrUnexpectedEOF {
- return true
- }
- if err, ok := err.(net.Error); ok {
- return err.Temporary()
- }
-
- return false
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go
deleted file mode 100644
index 03e9acdd80..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go
+++ /dev/null
@@ -1,424 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package googleapi contains the common code shared by all Google API
-// libraries.
-package googleapi
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
-
- "google.golang.org/api/googleapi/internal/uritemplates"
-)
-
-// ContentTyper is an interface for Readers which know (or would like
-// to override) their Content-Type. If a media body doesn't implement
-// ContentTyper, the type is sniffed from the content using
-// http.DetectContentType.
-type ContentTyper interface {
- ContentType() string
-}
-
-// A SizeReaderAt is a ReaderAt with a Size method.
-// An io.SectionReader implements SizeReaderAt.
-type SizeReaderAt interface {
- io.ReaderAt
- Size() int64
-}
-
-// ServerResponse is embedded in each Do response and
-// provides the HTTP status code and header sent by the server.
-type ServerResponse struct {
- // HTTPStatusCode is the server's response status code.
- // When using a resource method's Do call, this will always be in the 2xx range.
- HTTPStatusCode int
- // Header contains the response header fields from the server.
- Header http.Header
-}
-
-const (
- Version = "0.5"
-
- // UserAgent is the header string used to identify this package.
- UserAgent = "google-api-go-client/" + Version
-
- // The default chunk size to use for resumable uplods if not specified by the user.
- DefaultUploadChunkSize = 8 * 1024 * 1024
-
- // The minimum chunk size that can be used for resumable uploads. All
- // user-specified chunk sizes must be multiple of this value.
- MinUploadChunkSize = 256 * 1024
-)
-
-// Error contains an error response from the server.
-type Error struct {
- // Code is the HTTP response status code and will always be populated.
- Code int `json:"code"`
- // Message is the server response message and is only populated when
- // explicitly referenced by the JSON server response.
- Message string `json:"message"`
- // Body is the raw response returned by the server.
- // It is often but not always JSON, depending on how the request fails.
- Body string
- // Header contains the response header fields from the server.
- Header http.Header
-
- Errors []ErrorItem
-}
-
-// ErrorItem is a detailed error code & message from the Google API frontend.
-type ErrorItem struct {
- // Reason is the typed error code. For example: "some_example".
- Reason string `json:"reason"`
- // Message is the human-readable description of the error.
- Message string `json:"message"`
-}
-
-func (e *Error) Error() string {
- if len(e.Errors) == 0 && e.Message == "" {
- return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
- }
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
- if e.Message != "" {
- fmt.Fprintf(&buf, "%s", e.Message)
- }
- if len(e.Errors) == 0 {
- return strings.TrimSpace(buf.String())
- }
- if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
- fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
- return buf.String()
- }
- fmt.Fprintln(&buf, "\nMore details:")
- for _, v := range e.Errors {
- fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
- }
- return buf.String()
-}
-
-type errorReply struct {
- Error *Error `json:"error"`
-}
-
-// CheckResponse returns an error (of type *Error) if the response
-// status code is not 2xx.
-func CheckResponse(res *http.Response) error {
- if res.StatusCode >= 200 && res.StatusCode <= 299 {
- return nil
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err == nil {
- jerr := new(errorReply)
- err = json.Unmarshal(slurp, jerr)
- if err == nil && jerr.Error != nil {
- if jerr.Error.Code == 0 {
- jerr.Error.Code = res.StatusCode
- }
- jerr.Error.Body = string(slurp)
- return jerr.Error
- }
- }
- return &Error{
- Code: res.StatusCode,
- Body: string(slurp),
- Header: res.Header,
- }
-}
-
-// IsNotModified reports whether err is the result of the
-// server replying with http.StatusNotModified.
-// Such error values are sometimes returned by "Do" methods
-// on calls when If-None-Match is used.
-func IsNotModified(err error) bool {
- if err == nil {
- return false
- }
- ae, ok := err.(*Error)
- return ok && ae.Code == http.StatusNotModified
-}
-
-// CheckMediaResponse returns an error (of type *Error) if the response
-// status code is not 2xx. Unlike CheckResponse it does not assume the
-// body is a JSON error document.
-func CheckMediaResponse(res *http.Response) error {
- if res.StatusCode >= 200 && res.StatusCode <= 299 {
- return nil
- }
- slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
- res.Body.Close()
- return &Error{
- Code: res.StatusCode,
- Body: string(slurp),
- }
-}
-
-type MarshalStyle bool
-
-var WithDataWrapper = MarshalStyle(true)
-var WithoutDataWrapper = MarshalStyle(false)
-
-func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
- buf := new(bytes.Buffer)
- if wrap {
- buf.Write([]byte(`{"data": `))
- }
- err := json.NewEncoder(buf).Encode(v)
- if err != nil {
- return nil, err
- }
- if wrap {
- buf.Write([]byte(`}`))
- }
- return buf, nil
-}
-
-// endingWithErrorReader from r until it returns an error. If the
-// final error from r is io.EOF and e is non-nil, e is used instead.
-type endingWithErrorReader struct {
- r io.Reader
- e error
-}
-
-func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
- n, err = er.r.Read(p)
- if err == io.EOF && er.e != nil {
- err = er.e
- }
- return
-}
-
-// countingWriter counts the number of bytes it receives to write, but
-// discards them.
-type countingWriter struct {
- n *int64
-}
-
-func (w countingWriter) Write(p []byte) (int, error) {
- *w.n += int64(len(p))
- return len(p), nil
-}
-
-// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
-// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
-// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
-type ProgressUpdater func(current, total int64)
-
-type MediaOption interface {
- setOptions(o *MediaOptions)
-}
-
-type contentTypeOption string
-
-func (ct contentTypeOption) setOptions(o *MediaOptions) {
- o.ContentType = string(ct)
- if o.ContentType == "" {
- o.ForceEmptyContentType = true
- }
-}
-
-// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
-// If ctype is empty, the Content-Type header will be omitted.
-func ContentType(ctype string) MediaOption {
- return contentTypeOption(ctype)
-}
-
-type chunkSizeOption int
-
-func (cs chunkSizeOption) setOptions(o *MediaOptions) {
- size := int(cs)
- if size%MinUploadChunkSize != 0 {
- size += MinUploadChunkSize - (size % MinUploadChunkSize)
- }
- o.ChunkSize = size
-}
-
-// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
-// size will be rounded up to the nearest multiple of 256K.
-// Media which contains fewer than size bytes will be uploaded in a single request.
-// Media which contains size bytes or more will be uploaded in separate chunks.
-// If size is zero, media will be uploaded in a single request.
-func ChunkSize(size int) MediaOption {
- return chunkSizeOption(size)
-}
-
-// MediaOptions stores options for customizing media upload. It is not used by developers directly.
-type MediaOptions struct {
- ContentType string
- ForceEmptyContentType bool
-
- ChunkSize int
-}
-
-// ProcessMediaOptions stores options from opts in a MediaOptions.
-// It is not used by developers directly.
-func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
- mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
- for _, o := range opts {
- o.setOptions(mo)
- }
- return mo
-}
-
-func ResolveRelative(basestr, relstr string) string {
- u, _ := url.Parse(basestr)
- rel, _ := url.Parse(relstr)
- u = u.ResolveReference(rel)
- us := u.String()
- us = strings.Replace(us, "%7B", "{", -1)
- us = strings.Replace(us, "%7D", "}", -1)
- return us
-}
-
-// has4860Fix is whether this Go environment contains the fix for
-// http://golang.org/issue/4860
-var has4860Fix bool
-
-// init initializes has4860Fix by checking the behavior of the net/http package.
-func init() {
- r := http.Request{
- URL: &url.URL{
- Scheme: "http",
- Opaque: "//opaque",
- },
- }
- b := &bytes.Buffer{}
- r.Write(b)
- has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
-}
-
-// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
-// don't alter any hex-escaped characters in u.Path.
-func SetOpaque(u *url.URL) {
- u.Opaque = "//" + u.Host + u.Path
- if !has4860Fix {
- u.Opaque = u.Scheme + ":" + u.Opaque
- }
-}
-
-// Expand subsitutes any {encoded} strings in the URL passed in using
-// the map supplied.
-//
-// This calls SetOpaque to avoid encoding of the parameters in the URL path.
-func Expand(u *url.URL, expansions map[string]string) {
- expanded, err := uritemplates.Expand(u.Path, expansions)
- if err == nil {
- u.Path = expanded
- SetOpaque(u)
- }
-}
-
-// CloseBody is used to close res.Body.
-// Prior to calling Close, it also tries to Read a small amount to see an EOF.
-// Not seeing an EOF can prevent HTTP Transports from reusing connections.
-func CloseBody(res *http.Response) {
- if res == nil || res.Body == nil {
- return
- }
- // Justification for 3 byte reads: two for up to "\r\n" after
- // a JSON/XML document, and then 1 to see EOF if we haven't yet.
- // TODO(bradfitz): detect Go 1.3+ and skip these reads.
- // See https://codereview.appspot.com/58240043
- // and https://codereview.appspot.com/49570044
- buf := make([]byte, 1)
- for i := 0; i < 3; i++ {
- _, err := res.Body.Read(buf)
- if err != nil {
- break
- }
- }
- res.Body.Close()
-
-}
-
-// VariantType returns the type name of the given variant.
-// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
-// This is used to support "variant" APIs that can return one of a number of different types.
-func VariantType(t map[string]interface{}) string {
- s, _ := t["type"].(string)
- return s
-}
-
-// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
-// This is used to support "variant" APIs that can return one of a number of different types.
-// It reports whether the conversion was successful.
-func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
- var buf bytes.Buffer
- err := json.NewEncoder(&buf).Encode(v)
- if err != nil {
- return false
- }
- return json.Unmarshal(buf.Bytes(), dst) == nil
-}
-
-// A Field names a field to be retrieved with a partial response.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-//
-// Partial responses can dramatically reduce the amount of data that must be sent to your application.
-// In order to request partial responses, you can specify the full list of fields
-// that your application needs by adding the Fields option to your request.
-//
-// Field strings use camelCase with leading lower-case characters to identify fields within the response.
-//
-// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
-// you could request just those fields like this:
-//
-// svc.Events.List().Fields("nextPageToken", "items/id").Do()
-//
-// or if you were also interested in each Item's "Updated" field, you can combine them like this:
-//
-// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
-//
-// More information about field formatting can be found here:
-// https://developers.google.com/+/api/#fields-syntax
-//
-// Another way to find field names is through the Google API explorer:
-// https://developers.google.com/apis-explorer/#p/
-type Field string
-
-// CombineFields combines fields into a single string.
-func CombineFields(s []Field) string {
- r := make([]string, len(s))
- for i, v := range s {
- r[i] = string(v)
- }
- return strings.Join(r, ",")
-}
-
-// A CallOption is an optional argument to an API call.
-// It should be treated as an opaque value by users of Google APIs.
-//
-// A CallOption is something that configures an API call in a way that is
-// not specific to that API; for instance, controlling the quota user for
-// an API call is common across many APIs, and is thus a CallOption.
-type CallOption interface {
- Get() (key, value string)
-}
-
-// QuotaUser returns a CallOption that will set the quota user for a call.
-// The quota user can be used by server-side applications to control accounting.
-// It can be an arbitrary string up to 40 characters, and will override UserIP
-// if both are provided.
-func QuotaUser(u string) CallOption { return quotaUser(u) }
-
-type quotaUser string
-
-func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
-
-// UserIP returns a CallOption that will set the "userIp" parameter of a call.
-// This should be the IP address of the originating request.
-func UserIP(ip string) CallOption { return userIP(ip) }
-
-type userIP string
-
-func (i userIP) Get() (string, string) { return "userIp", string(i) }
-
-// TODO: Fields too
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
deleted file mode 100644
index de9c88cb65..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
+++ /dev/null
@@ -1,18 +0,0 @@
-Copyright (c) 2013 Joshua Tacoma
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
deleted file mode 100644
index 7c103ba138..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 Joshua Tacoma. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package uritemplates is a level 3 implementation of RFC 6570 (URI
-// Template, http://tools.ietf.org/html/rfc6570).
-// uritemplates does not support composite values (in Go: slices or maps)
-// and so does not qualify as a level 4 implementation.
-package uritemplates
-
-import (
- "bytes"
- "errors"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
- reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
- validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
- hex = []byte("0123456789ABCDEF")
-)
-
-func pctEncode(src []byte) []byte {
- dst := make([]byte, len(src)*3)
- for i, b := range src {
- buf := dst[i*3 : i*3+3]
- buf[0] = 0x25
- buf[1] = hex[b/16]
- buf[2] = hex[b%16]
- }
- return dst
-}
-
-func escape(s string, allowReserved bool) string {
- if allowReserved {
- return string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
- }
- return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
-}
-
-// A uriTemplate is a parsed representation of a URI template.
-type uriTemplate struct {
- raw string
- parts []templatePart
-}
-
-// parse parses a URI template string into a uriTemplate object.
-func parse(rawTemplate string) (*uriTemplate, error) {
- split := strings.Split(rawTemplate, "{")
- parts := make([]templatePart, len(split)*2-1)
- for i, s := range split {
- if i == 0 {
- if strings.Contains(s, "}") {
- return nil, errors.New("unexpected }")
- }
- parts[i].raw = s
- continue
- }
- subsplit := strings.Split(s, "}")
- if len(subsplit) != 2 {
- return nil, errors.New("malformed template")
- }
- expression := subsplit[0]
- var err error
- parts[i*2-1], err = parseExpression(expression)
- if err != nil {
- return nil, err
- }
- parts[i*2].raw = subsplit[1]
- }
- return &uriTemplate{
- raw: rawTemplate,
- parts: parts,
- }, nil
-}
-
-type templatePart struct {
- raw string
- terms []templateTerm
- first string
- sep string
- named bool
- ifemp string
- allowReserved bool
-}
-
-type templateTerm struct {
- name string
- explode bool
- truncate int
-}
-
-func parseExpression(expression string) (result templatePart, err error) {
- switch expression[0] {
- case '+':
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- case '.':
- result.first = "."
- result.sep = "."
- expression = expression[1:]
- case '/':
- result.first = "/"
- result.sep = "/"
- expression = expression[1:]
- case ';':
- result.first = ";"
- result.sep = ";"
- result.named = true
- expression = expression[1:]
- case '?':
- result.first = "?"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '&':
- result.first = "&"
- result.sep = "&"
- result.named = true
- result.ifemp = "="
- expression = expression[1:]
- case '#':
- result.first = "#"
- result.sep = ","
- result.allowReserved = true
- expression = expression[1:]
- default:
- result.sep = ","
- }
- rawterms := strings.Split(expression, ",")
- result.terms = make([]templateTerm, len(rawterms))
- for i, raw := range rawterms {
- result.terms[i], err = parseTerm(raw)
- if err != nil {
- break
- }
- }
- return result, err
-}
-
-func parseTerm(term string) (result templateTerm, err error) {
- // TODO(djd): Remove "*" suffix parsing once we check that no APIs have
- // mistakenly used that attribute.
- if strings.HasSuffix(term, "*") {
- result.explode = true
- term = term[:len(term)-1]
- }
- split := strings.Split(term, ":")
- if len(split) == 1 {
- result.name = term
- } else if len(split) == 2 {
- result.name = split[0]
- var parsed int64
- parsed, err = strconv.ParseInt(split[1], 10, 0)
- result.truncate = int(parsed)
- } else {
- err = errors.New("multiple colons in same term")
- }
- if !validname.MatchString(result.name) {
- err = errors.New("not a valid name: " + result.name)
- }
- if result.explode && result.truncate > 0 {
- err = errors.New("both explode and prefix modifers on same term")
- }
- return result, err
-}
-
-// Expand expands a URI template with a set of values to produce a string.
-func (t *uriTemplate) Expand(values map[string]string) string {
- var buf bytes.Buffer
- for _, p := range t.parts {
- p.expand(&buf, values)
- }
- return buf.String()
-}
-
-func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) {
- if len(tp.raw) > 0 {
- buf.WriteString(tp.raw)
- return
- }
- var first = true
- for _, term := range tp.terms {
- value, exists := values[term.name]
- if !exists {
- continue
- }
- if first {
- buf.WriteString(tp.first)
- first = false
- } else {
- buf.WriteString(tp.sep)
- }
- tp.expandString(buf, term, value)
- }
-}
-
-func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
- if tp.named {
- buf.WriteString(name)
- if empty {
- buf.WriteString(tp.ifemp)
- } else {
- buf.WriteString("=")
- }
- }
-}
-
-func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
- if len(s) > t.truncate && t.truncate > 0 {
- s = s[:t.truncate]
- }
- tp.expandName(buf, t.name, len(s) == 0)
- buf.WriteString(escape(s, tp.allowReserved))
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
deleted file mode 100644
index eff260a692..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uritemplates
-
-func Expand(path string, values map[string]string) (string, error) {
- template, err := parse(path)
- if err != nil {
- return "", err
- }
- return template.Expand(values), nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go
deleted file mode 100644
index a02b4b0716..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package googleapi
-
-import (
- "encoding/json"
- "strconv"
-)
-
-// Int64s is a slice of int64s that marshal as quoted strings in JSON.
-type Int64s []int64
-
-func (q *Int64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return err
- }
- *q = append(*q, int64(v))
- }
- return nil
-}
-
-// Int32s is a slice of int32s that marshal as quoted strings in JSON.
-type Int32s []int32
-
-func (q *Int32s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseInt(s, 10, 32)
- if err != nil {
- return err
- }
- *q = append(*q, int32(v))
- }
- return nil
-}
-
-// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
-type Uint64s []uint64
-
-func (q *Uint64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseUint(s, 10, 64)
- if err != nil {
- return err
- }
- *q = append(*q, uint64(v))
- }
- return nil
-}
-
-// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
-type Uint32s []uint32
-
-func (q *Uint32s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- return err
- }
- *q = append(*q, uint32(v))
- }
- return nil
-}
-
-// Float64s is a slice of float64s that marshal as quoted strings in JSON.
-type Float64s []float64
-
-func (q *Float64s) UnmarshalJSON(raw []byte) error {
- *q = (*q)[:0]
- var ss []string
- if err := json.Unmarshal(raw, &ss); err != nil {
- return err
- }
- for _, s := range ss {
- v, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return err
- }
- *q = append(*q, float64(v))
- }
- return nil
-}
-
-func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
- dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
- dst = append(dst, '[')
- for i := 0; i < n; i++ {
- if i > 0 {
- dst = append(dst, ',')
- }
- dst = append(dst, '"')
- dst = fn(dst, i)
- dst = append(dst, '"')
- }
- dst = append(dst, ']')
- return dst, nil
-}
-
-func (s Int64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendInt(dst, s[i], 10)
- })
-}
-
-func (s Int32s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendInt(dst, int64(s[i]), 10)
- })
-}
-
-func (s Uint64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendUint(dst, s[i], 10)
- })
-}
-
-func (s Uint32s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendUint(dst, uint64(s[i]), 10)
- })
-}
-
-func (s Float64s) MarshalJSON() ([]byte, error) {
- return quotedList(len(s), func(dst []byte, i int) []byte {
- return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
- })
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json
deleted file mode 100644
index 3768b46877..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ /dev/null
@@ -1,2865 +0,0 @@
-{
- "kind": "discovery#restDescription",
- "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"",
- "discoveryVersion": "v1",
- "id": "storage:v1",
- "name": "storage",
- "version": "v1",
- "revision": "20160304",
- "title": "Cloud Storage JSON API",
- "description": "Stores and retrieves potentially large, immutable data objects.",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "icons": {
- "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
- "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
- },
- "documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "labels": [
- "labs"
- ],
- "protocol": "rest",
- "baseUrl": "https://www.googleapis.com/storage/v1/",
- "basePath": "/storage/v1/",
- "rootUrl": "https://www.googleapis.com/",
- "servicePath": "storage/v1/",
- "batchPath": "batch",
- "parameters": {
- "alt": {
- "type": "string",
- "description": "Data format for the response.",
- "default": "json",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query"
- },
- "fields": {
- "type": "string",
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query"
- },
- "key": {
- "type": "string",
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query"
- },
- "oauth_token": {
- "type": "string",
- "description": "OAuth 2.0 token for the current user.",
- "location": "query"
- },
- "prettyPrint": {
- "type": "boolean",
- "description": "Returns response with indentations and line breaks.",
- "default": "true",
- "location": "query"
- },
- "quotaUser": {
- "type": "string",
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
- "location": "query"
- },
- "userIp": {
- "type": "string",
- "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
- "location": "query"
- }
- },
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/cloud-platform.read-only": {
- "description": "View your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/devstorage.full_control": {
- "description": "Manage your data and permissions in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_only": {
- "description": "View your data in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_write": {
- "description": "Manage your data in Google Cloud Storage"
- }
- }
- }
- },
- "schemas": {
- "Bucket": {
- "id": "Bucket",
- "type": "object",
- "description": "A bucket.",
- "properties": {
- "acl": {
- "type": "array",
- "description": "Access controls on the bucket.",
- "items": {
- "$ref": "BucketAccessControl"
- },
- "annotations": {
- "required": [
- "storage.buckets.update"
- ]
- }
- },
- "cors": {
- "type": "array",
- "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
- "items": {
- "type": "object",
- "properties": {
- "maxAgeSeconds": {
- "type": "integer",
- "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
- "format": "int32"
- },
- "method": {
- "type": "array",
- "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
- "items": {
- "type": "string"
- }
- },
- "origin": {
- "type": "array",
- "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
- "items": {
- "type": "string"
- }
- },
- "responseHeader": {
- "type": "array",
- "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
- "items": {
- "type": "string"
- }
- }
- }
- }
- },
- "defaultObjectAcl": {
- "type": "array",
- "description": "Default access controls to apply to new objects when no ACL is provided.",
- "items": {
- "$ref": "ObjectAccessControl"
- }
- },
- "etag": {
- "type": "string",
- "description": "HTTP 1.1 Entity tag for the bucket."
- },
- "id": {
- "type": "string",
- "description": "The ID of the bucket."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For buckets, this is always storage#bucket.",
- "default": "storage#bucket"
- },
- "lifecycle": {
- "type": "object",
- "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
- "properties": {
- "rule": {
- "type": "array",
- "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
- "items": {
- "type": "object",
- "properties": {
- "action": {
- "type": "object",
- "description": "The action to take.",
- "properties": {
- "type": {
- "type": "string",
- "description": "Type of the action. Currently, only Delete is supported."
- }
- }
- },
- "condition": {
- "type": "object",
- "description": "The condition(s) under which the action will be taken.",
- "properties": {
- "age": {
- "type": "integer",
- "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
- "format": "int32"
- },
- "createdBefore": {
- "type": "string",
- "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
- "format": "date"
- },
- "isLive": {
- "type": "boolean",
- "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects."
- },
- "numNewerVersions": {
- "type": "integer",
- "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
- "format": "int32"
- }
- }
- }
- }
- }
- }
- }
- },
- "location": {
- "type": "string",
- "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list."
- },
- "logging": {
- "type": "object",
- "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
- "properties": {
- "logBucket": {
- "type": "string",
- "description": "The destination bucket where the current bucket's logs should be placed."
- },
- "logObjectPrefix": {
- "type": "string",
- "description": "A prefix for log object names."
- }
- }
- },
- "metageneration": {
- "type": "string",
- "description": "The metadata generation of this bucket.",
- "format": "int64"
- },
- "name": {
- "type": "string",
- "description": "The name of the bucket.",
- "annotations": {
- "required": [
- "storage.buckets.insert"
- ]
- }
- },
- "owner": {
- "type": "object",
- "description": "The owner of the bucket. This is always the project team's owner group.",
- "properties": {
- "entity": {
- "type": "string",
- "description": "The entity, in the form project-owner-projectId."
- },
- "entityId": {
- "type": "string",
- "description": "The ID for the entity."
- }
- }
- },
- "projectNumber": {
- "type": "string",
- "description": "The project number of the project the bucket belongs to.",
- "format": "uint64"
- },
- "selfLink": {
- "type": "string",
- "description": "The URI of this bucket."
- },
- "storageClass": {
- "type": "string",
- "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes."
- },
- "timeCreated": {
- "type": "string",
- "description": "The creation time of the bucket in RFC 3339 format.",
- "format": "date-time"
- },
- "updated": {
- "type": "string",
- "description": "The modification time of the bucket in RFC 3339 format.",
- "format": "date-time"
- },
- "versioning": {
- "type": "object",
- "description": "The bucket's versioning configuration.",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "While set to true, versioning is fully enabled for this bucket."
- }
- }
- },
- "website": {
- "type": "object",
- "description": "The bucket's website configuration.",
- "properties": {
- "mainPageSuffix": {
- "type": "string",
- "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories."
- },
- "notFoundPage": {
- "type": "string",
- "description": "The custom object to return when a requested resource is not found."
- }
- }
- }
- }
- },
- "BucketAccessControl": {
- "id": "BucketAccessControl",
- "type": "object",
- "description": "An access-control entry.",
- "properties": {
- "bucket": {
- "type": "string",
- "description": "The name of the bucket."
- },
- "domain": {
- "type": "string",
- "description": "The domain associated with the entity, if any."
- },
- "email": {
- "type": "string",
- "description": "The email address associated with the entity, if any."
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- }
- },
- "entityId": {
- "type": "string",
- "description": "The ID for the entity, if any."
- },
- "etag": {
- "type": "string",
- "description": "HTTP 1.1 Entity tag for the access-control entry."
- },
- "id": {
- "type": "string",
- "description": "The ID of the access-control entry."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
- "default": "storage#bucketAccessControl"
- },
- "projectTeam": {
- "type": "object",
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "type": "string",
- "description": "The project number."
- },
- "team": {
- "type": "string",
- "description": "The team. Can be owners, editors, or viewers."
- }
- }
- },
- "role": {
- "type": "string",
- "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.",
- "annotations": {
- "required": [
- "storage.bucketAccessControls.insert"
- ]
- }
- },
- "selfLink": {
- "type": "string",
- "description": "The link to this access-control entry."
- }
- }
- },
- "BucketAccessControls": {
- "id": "BucketAccessControls",
- "type": "object",
- "description": "An access-control list.",
- "properties": {
- "items": {
- "type": "array",
- "description": "The list of items.",
- "items": {
- "$ref": "BucketAccessControl"
- }
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
- "default": "storage#bucketAccessControls"
- }
- }
- },
- "Buckets": {
- "id": "Buckets",
- "type": "object",
- "description": "A list of buckets.",
- "properties": {
- "items": {
- "type": "array",
- "description": "The list of items.",
- "items": {
- "$ref": "Bucket"
- }
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
- "default": "storage#buckets"
- },
- "nextPageToken": {
- "type": "string",
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
- }
- }
- },
- "Channel": {
- "id": "Channel",
- "type": "object",
- "description": "An notification channel used to watch for resource changes.",
- "properties": {
- "address": {
- "type": "string",
- "description": "The address where notifications are delivered for this channel."
- },
- "expiration": {
- "type": "string",
- "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
- "format": "int64"
- },
- "id": {
- "type": "string",
- "description": "A UUID or similar unique string that identifies this channel."
- },
- "kind": {
- "type": "string",
- "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
- "default": "api#channel"
- },
- "params": {
- "type": "object",
- "description": "Additional parameters controlling delivery channel behavior. Optional.",
- "additionalProperties": {
- "type": "string",
- "description": "Declares a new parameter by name."
- }
- },
- "payload": {
- "type": "boolean",
- "description": "A Boolean value to indicate whether payload is wanted. Optional."
- },
- "resourceId": {
- "type": "string",
- "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions."
- },
- "resourceUri": {
- "type": "string",
- "description": "A version-specific identifier for the watched resource."
- },
- "token": {
- "type": "string",
- "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional."
- },
- "type": {
- "type": "string",
- "description": "The type of delivery mechanism used for this channel."
- }
- }
- },
- "ComposeRequest": {
- "id": "ComposeRequest",
- "type": "object",
- "description": "A Compose request.",
- "properties": {
- "destination": {
- "$ref": "Object",
- "description": "Properties of the resulting object."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is.",
- "default": "storage#composeRequest"
- },
- "sourceObjects": {
- "type": "array",
- "description": "The list of source objects that will be concatenated into a single object.",
- "items": {
- "type": "object",
- "properties": {
- "generation": {
- "type": "string",
- "description": "The generation of this object to use as the source.",
- "format": "int64"
- },
- "name": {
- "type": "string",
- "description": "The source object's name. The source object's bucket is implicitly the destination bucket.",
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- }
- },
- "objectPreconditions": {
- "type": "object",
- "description": "Conditions that must be met for this operation to execute.",
- "properties": {
- "ifGenerationMatch": {
- "type": "string",
- "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
- "format": "int64"
- }
- }
- }
- }
- },
- "annotations": {
- "required": [
- "storage.objects.compose"
- ]
- }
- }
- }
- },
- "Object": {
- "id": "Object",
- "type": "object",
- "description": "An object.",
- "properties": {
- "acl": {
- "type": "array",
- "description": "Access controls on the object.",
- "items": {
- "$ref": "ObjectAccessControl"
- },
- "annotations": {
- "required": [
- "storage.objects.update"
- ]
- }
- },
- "bucket": {
- "type": "string",
- "description": "The name of the bucket containing this object."
- },
- "cacheControl": {
- "type": "string",
- "description": "Cache-Control directive for the object data."
- },
- "componentCount": {
- "type": "integer",
- "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
- "format": "int32"
- },
- "contentDisposition": {
- "type": "string",
- "description": "Content-Disposition of the object data."
- },
- "contentEncoding": {
- "type": "string",
- "description": "Content-Encoding of the object data."
- },
- "contentLanguage": {
- "type": "string",
- "description": "Content-Language of the object data."
- },
- "contentType": {
- "type": "string",
- "description": "Content-Type of the object data."
- },
- "crc32c": {
- "type": "string",
- "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices."
- },
- "customerEncryption": {
- "type": "object",
- "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
- "properties": {
- "encryptionAlgorithm": {
- "type": "string",
- "description": "The encryption algorithm."
- },
- "keySha256": {
- "type": "string",
- "description": "SHA256 hash value of the encryption key."
- }
- }
- },
- "etag": {
- "type": "string",
- "description": "HTTP 1.1 Entity tag for the object."
- },
- "generation": {
- "type": "string",
- "description": "The content generation of this object. Used for object versioning.",
- "format": "int64"
- },
- "id": {
- "type": "string",
- "description": "The ID of the object."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For objects, this is always storage#object.",
- "default": "storage#object"
- },
- "md5Hash": {
- "type": "string",
- "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices."
- },
- "mediaLink": {
- "type": "string",
- "description": "Media download link."
- },
- "metadata": {
- "type": "object",
- "description": "User-provided metadata, in key/value pairs.",
- "additionalProperties": {
- "type": "string",
- "description": "An individual metadata entry."
- }
- },
- "metageneration": {
- "type": "string",
- "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
- "format": "int64"
- },
- "name": {
- "type": "string",
- "description": "The name of this object. Required if not specified by URL parameter."
- },
- "owner": {
- "type": "object",
- "description": "The owner of the object. This will always be the uploader of the object.",
- "properties": {
- "entity": {
- "type": "string",
- "description": "The entity, in the form user-userId."
- },
- "entityId": {
- "type": "string",
- "description": "The ID for the entity."
- }
- }
- },
- "selfLink": {
- "type": "string",
- "description": "The link to this object."
- },
- "size": {
- "type": "string",
- "description": "Content-Length of the data in bytes.",
- "format": "uint64"
- },
- "storageClass": {
- "type": "string",
- "description": "Storage class of the object."
- },
- "timeCreated": {
- "type": "string",
- "description": "The creation time of the object in RFC 3339 format.",
- "format": "date-time"
- },
- "timeDeleted": {
- "type": "string",
- "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
- "format": "date-time"
- },
- "updated": {
- "type": "string",
- "description": "The modification time of the object metadata in RFC 3339 format.",
- "format": "date-time"
- }
- }
- },
- "ObjectAccessControl": {
- "id": "ObjectAccessControl",
- "type": "object",
- "description": "An access-control entry.",
- "properties": {
- "bucket": {
- "type": "string",
- "description": "The name of the bucket."
- },
- "domain": {
- "type": "string",
- "description": "The domain associated with the entity, if any."
- },
- "email": {
- "type": "string",
- "description": "The email address associated with the entity, if any."
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com."
- },
- "entityId": {
- "type": "string",
- "description": "The ID for the entity, if any."
- },
- "etag": {
- "type": "string",
- "description": "HTTP 1.1 Entity tag for the access-control entry."
- },
- "generation": {
- "type": "string",
- "description": "The content generation of the object.",
- "format": "int64"
- },
- "id": {
- "type": "string",
- "description": "The ID of the access-control entry."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
- "default": "storage#objectAccessControl"
- },
- "object": {
- "type": "string",
- "description": "The name of the object."
- },
- "projectTeam": {
- "type": "object",
- "description": "The project team associated with the entity, if any.",
- "properties": {
- "projectNumber": {
- "type": "string",
- "description": "The project number."
- },
- "team": {
- "type": "string",
- "description": "The team. Can be owners, editors, or viewers."
- }
- }
- },
- "role": {
- "type": "string",
- "description": "The access permission for the entity. Can be READER or OWNER."
- },
- "selfLink": {
- "type": "string",
- "description": "The link to this access-control entry."
- }
- }
- },
- "ObjectAccessControls": {
- "id": "ObjectAccessControls",
- "type": "object",
- "description": "An access-control list.",
- "properties": {
- "items": {
- "type": "array",
- "description": "The list of items.",
- "items": {
- "type": "any"
- }
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
- "default": "storage#objectAccessControls"
- }
- }
- },
- "Objects": {
- "id": "Objects",
- "type": "object",
- "description": "A list of objects.",
- "properties": {
- "items": {
- "type": "array",
- "description": "The list of items.",
- "items": {
- "$ref": "Object"
- }
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
- "default": "storage#objects"
- },
- "nextPageToken": {
- "type": "string",
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
- },
- "prefixes": {
- "type": "array",
- "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "RewriteResponse": {
- "id": "RewriteResponse",
- "type": "object",
- "description": "A rewrite response.",
- "properties": {
- "done": {
- "type": "boolean",
- "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response."
- },
- "kind": {
- "type": "string",
- "description": "The kind of item this is.",
- "default": "storage#rewriteResponse"
- },
- "objectSize": {
- "type": "string",
- "description": "The total size of the object being copied in bytes. This property is always present in the response.",
- "format": "uint64"
- },
- "resource": {
- "$ref": "Object",
- "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
- },
- "rewriteToken": {
- "type": "string",
- "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy."
- },
- "totalBytesRewritten": {
- "type": "string",
- "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
- "format": "uint64"
- }
- }
- }
- },
- "resources": {
- "bucketAccessControls": {
- "methods": {
- "delete": {
- "id": "storage.bucketAccessControls.delete",
- "path": "b/{bucket}/acl/{entity}",
- "httpMethod": "DELETE",
- "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "id": "storage.bucketAccessControls.get",
- "path": "b/{bucket}/acl/{entity}",
- "httpMethod": "GET",
- "description": "Returns the ACL entry for the specified entity on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "id": "storage.bucketAccessControls.insert",
- "path": "b/{bucket}/acl",
- "httpMethod": "POST",
- "description": "Creates a new ACL entry on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "id": "storage.bucketAccessControls.list",
- "path": "b/{bucket}/acl",
- "httpMethod": "GET",
- "description": "Retrieves ACL entries on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "response": {
- "$ref": "BucketAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "id": "storage.bucketAccessControls.patch",
- "path": "b/{bucket}/acl/{entity}",
- "httpMethod": "PATCH",
- "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "id": "storage.bucketAccessControls.update",
- "path": "b/{bucket}/acl/{entity}",
- "httpMethod": "PUT",
- "description": "Updates an ACL entry on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "request": {
- "$ref": "BucketAccessControl"
- },
- "response": {
- "$ref": "BucketAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "buckets": {
- "methods": {
- "delete": {
- "id": "storage.buckets.delete",
- "path": "b/{bucket}",
- "httpMethod": "DELETE",
- "description": "Permanently deletes an empty bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "If set, only deletes the bucket if its metageneration matches this value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "If set, only deletes the bucket if its metageneration does not match this value.",
- "format": "int64",
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "id": "storage.buckets.get",
- "path": "b/{bucket}",
- "httpMethod": "GET",
- "description": "Returns metadata for the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit acl and defaultObjectAcl properties."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "id": "storage.buckets.insert",
- "path": "b",
- "httpMethod": "POST",
- "description": "Creates a new bucket.",
- "parameters": {
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query"
- },
- "predefinedDefaultObjectAcl": {
- "type": "string",
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "project": {
- "type": "string",
- "description": "A valid API project identifier.",
- "required": true,
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit acl and defaultObjectAcl properties."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "project"
- ],
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "id": "storage.buckets.list",
- "path": "b",
- "httpMethod": "GET",
- "description": "Retrieves a list of buckets for a given project.",
- "parameters": {
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of buckets to return.",
- "format": "uint32",
- "minimum": "0",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query"
- },
- "prefix": {
- "type": "string",
- "description": "Filter results to buckets whose names begin with this prefix.",
- "location": "query"
- },
- "project": {
- "type": "string",
- "description": "A valid API project identifier.",
- "required": true,
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit acl and defaultObjectAcl properties."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "project"
- ],
- "response": {
- "$ref": "Buckets"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "patch": {
- "id": "storage.buckets.patch",
- "path": "b/{bucket}",
- "httpMethod": "PATCH",
- "description": "Updates a bucket. This method supports patch semantics.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query"
- },
- "predefinedDefaultObjectAcl": {
- "type": "string",
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit acl and defaultObjectAcl properties."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "id": "storage.buckets.update",
- "path": "b/{bucket}",
- "httpMethod": "PUT",
- "description": "Updates a bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "private",
- "projectPrivate",
- "publicRead",
- "publicReadWrite"
- ],
- "enumDescriptions": [
- "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- "Project team owners get OWNER access.",
- "Project team members get access according to their roles.",
- "Project team owners get OWNER access, and allUsers get READER access.",
- "Project team owners get OWNER access, and allUsers get WRITER access."
- ],
- "location": "query"
- },
- "predefinedDefaultObjectAcl": {
- "type": "string",
- "description": "Apply a predefined set of default object access controls to this bucket.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit acl and defaultObjectAcl properties."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "Bucket"
- },
- "response": {
- "$ref": "Bucket"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- },
- "channels": {
- "methods": {
- "stop": {
- "id": "storage.channels.stop",
- "path": "channels/stop",
- "httpMethod": "POST",
- "description": "Stop watching resources through this channel",
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- },
- "defaultObjectAccessControls": {
- "methods": {
- "delete": {
- "id": "storage.defaultObjectAccessControls.delete",
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "httpMethod": "DELETE",
- "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "id": "storage.defaultObjectAccessControls.get",
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "httpMethod": "GET",
- "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "id": "storage.defaultObjectAccessControls.insert",
- "path": "b/{bucket}/defaultObjectAcl",
- "httpMethod": "POST",
- "description": "Creates a new default object ACL entry on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "id": "storage.defaultObjectAccessControls.list",
- "path": "b/{bucket}/defaultObjectAcl",
- "httpMethod": "GET",
- "description": "Retrieves default object ACL entries on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "id": "storage.defaultObjectAccessControls.patch",
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "httpMethod": "PATCH",
- "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "id": "storage.defaultObjectAccessControls.update",
- "path": "b/{bucket}/defaultObjectAcl/{entity}",
- "httpMethod": "PUT",
- "description": "Updates a default object ACL entry on the specified bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "entity"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "objectAccessControls": {
- "methods": {
- "delete": {
- "id": "storage.objectAccessControls.delete",
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "httpMethod": "DELETE",
- "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "get": {
- "id": "storage.objectAccessControls.get",
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "httpMethod": "GET",
- "description": "Returns the ACL entry for the specified entity on the specified object.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "insert": {
- "id": "storage.objectAccessControls.insert",
- "path": "b/{bucket}/o/{object}/acl",
- "httpMethod": "POST",
- "description": "Creates a new ACL entry on the specified object.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "list": {
- "id": "storage.objectAccessControls.list",
- "path": "b/{bucket}/o/{object}/acl",
- "httpMethod": "GET",
- "description": "Retrieves ACL entries on the specified object.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "response": {
- "$ref": "ObjectAccessControls"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "patch": {
- "id": "storage.objectAccessControls.patch",
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "httpMethod": "PATCH",
- "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- },
- "update": {
- "id": "storage.objectAccessControls.update",
- "path": "b/{bucket}/o/{object}/acl/{entity}",
- "httpMethod": "PUT",
- "description": "Updates an ACL entry on the specified object.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of a bucket.",
- "required": true,
- "location": "path"
- },
- "entity": {
- "type": "string",
- "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object",
- "entity"
- ],
- "request": {
- "$ref": "ObjectAccessControl"
- },
- "response": {
- "$ref": "ObjectAccessControl"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control"
- ]
- }
- }
- },
- "objects": {
- "methods": {
- "compose": {
- "id": "storage.objects.compose",
- "path": "b/{destinationBucket}/o/{destinationObject}/compose",
- "httpMethod": "POST",
- "description": "Concatenates a list of existing objects into a new object in the same bucket.",
- "parameters": {
- "destinationBucket": {
- "type": "string",
- "description": "Name of the bucket in which to store the new object.",
- "required": true,
- "location": "path"
- },
- "destinationObject": {
- "type": "string",
- "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "destinationPredefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- }
- },
- "parameterOrder": [
- "destinationBucket",
- "destinationObject"
- ],
- "request": {
- "$ref": "ComposeRequest"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "copy": {
- "id": "storage.objects.copy",
- "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
- "httpMethod": "POST",
- "description": "Copies a source object to a destination object. Optionally overrides metadata.",
- "parameters": {
- "destinationBucket": {
- "type": "string",
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "destinationObject": {
- "type": "string",
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
- "required": true,
- "location": "path"
- },
- "destinationPredefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- },
- "sourceBucket": {
- "type": "string",
- "description": "Name of the bucket in which to find the source object.",
- "required": true,
- "location": "path"
- },
- "sourceGeneration": {
- "type": "string",
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "sourceObject": {
- "type": "string",
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "delete": {
- "id": "storage.objects.delete",
- "path": "b/{bucket}/o/{object}",
- "httpMethod": "DELETE",
- "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which the object resides.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "id": "storage.objects.get",
- "path": "b/{bucket}/o/{object}",
- "httpMethod": "GET",
- "description": "Retrieves an object or its metadata.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which the object resides.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "insert": {
- "id": "storage.objects.insert",
- "path": "b/{bucket}/o",
- "httpMethod": "POST",
- "description": "Stores a new object and metadata.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "required": true,
- "location": "path"
- },
- "contentEncoding": {
- "type": "string",
- "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "name": {
- "type": "string",
- "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "location": "query"
- },
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true,
- "supportsMediaUpload": true,
- "mediaUpload": {
- "accept": [
- "*/*"
- ],
- "protocols": {
- "simple": {
- "multipart": true,
- "path": "/upload/storage/v1/b/{bucket}/o"
- },
- "resumable": {
- "multipart": true,
- "path": "/resumable/upload/storage/v1/b/{bucket}/o"
- }
- }
- }
- },
- "list": {
- "id": "storage.objects.list",
- "path": "b/{bucket}/o",
- "httpMethod": "GET",
- "description": "Retrieves a list of objects matching the criteria.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which to look for objects.",
- "required": true,
- "location": "path"
- },
- "delimiter": {
- "type": "string",
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
- "format": "uint32",
- "minimum": "0",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query"
- },
- "prefix": {
- "type": "string",
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- },
- "versions": {
- "type": "boolean",
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "response": {
- "$ref": "Objects"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- },
- "patch": {
- "id": "storage.objects.patch",
- "path": "b/{bucket}/o/{object}",
- "httpMethod": "PATCH",
- "description": "Updates an object's metadata. This method supports patch semantics.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which the object resides.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "rewrite": {
- "id": "storage.objects.rewrite",
- "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
- "httpMethod": "POST",
- "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
- "parameters": {
- "destinationBucket": {
- "type": "string",
- "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- "required": true,
- "location": "path"
- },
- "destinationObject": {
- "type": "string",
- "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "destinationPredefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to the destination object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifSourceMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "maxBytesRewrittenPerCall": {
- "type": "string",
- "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
- "format": "int64",
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- },
- "rewriteToken": {
- "type": "string",
- "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
- "location": "query"
- },
- "sourceBucket": {
- "type": "string",
- "description": "Name of the bucket in which to find the source object.",
- "required": true,
- "location": "path"
- },
- "sourceGeneration": {
- "type": "string",
- "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "sourceObject": {
- "type": "string",
- "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "sourceBucket",
- "sourceObject",
- "destinationBucket",
- "destinationObject"
- ],
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "RewriteResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "update": {
- "id": "storage.objects.update",
- "path": "b/{bucket}/o/{object}",
- "httpMethod": "PUT",
- "description": "Updates an object's metadata.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which the object resides.",
- "required": true,
- "location": "path"
- },
- "generation": {
- "type": "string",
- "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifGenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query"
- },
- "ifMetagenerationNotMatch": {
- "type": "string",
- "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query"
- },
- "object": {
- "type": "string",
- "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- "required": true,
- "location": "path"
- },
- "predefinedAcl": {
- "type": "string",
- "description": "Apply a predefined set of access controls to this object.",
- "enum": [
- "authenticatedRead",
- "bucketOwnerFullControl",
- "bucketOwnerRead",
- "private",
- "projectPrivate",
- "publicRead"
- ],
- "enumDescriptions": [
- "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- "Object owner gets OWNER access, and project team owners get OWNER access.",
- "Object owner gets OWNER access, and project team owners get READER access.",
- "Object owner gets OWNER access.",
- "Object owner gets OWNER access, and project team members get access according to their roles.",
- "Object owner gets OWNER access, and allUsers get READER access."
- ],
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to full.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket",
- "object"
- ],
- "request": {
- "$ref": "Object"
- },
- "response": {
- "$ref": "Object"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaDownload": true,
- "useMediaDownloadService": true
- },
- "watchAll": {
- "id": "storage.objects.watchAll",
- "path": "b/{bucket}/o/watch",
- "httpMethod": "POST",
- "description": "Watch for changes on all objects in a bucket.",
- "parameters": {
- "bucket": {
- "type": "string",
- "description": "Name of the bucket in which to look for objects.",
- "required": true,
- "location": "path"
- },
- "delimiter": {
- "type": "string",
- "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- "location": "query"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
- "format": "uint32",
- "minimum": "0",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query"
- },
- "prefix": {
- "type": "string",
- "description": "Filter results to objects whose names begin with this prefix.",
- "location": "query"
- },
- "projection": {
- "type": "string",
- "description": "Set of properties to return. Defaults to noAcl.",
- "enum": [
- "full",
- "noAcl"
- ],
- "enumDescriptions": [
- "Include all properties.",
- "Omit the acl property."
- ],
- "location": "query"
- },
- "versions": {
- "type": "boolean",
- "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- "location": "query"
- }
- },
- "parameterOrder": [
- "bucket"
- ],
- "request": {
- "$ref": "Channel",
- "parameterName": "resource"
- },
- "response": {
- "$ref": "Channel"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsSubscription": true
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go
deleted file mode 100644
index a299044395..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ /dev/null
@@ -1,7690 +0,0 @@
-// Package storage provides access to the Cloud Storage JSON API.
-//
-// See https://developers.google.com/storage/docs/json_api/
-//
-// Usage example:
-//
-// import "google.golang.org/api/storage/v1"
-// ...
-// storageService, err := storage.New(oauthHttpClient)
-package storage
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- context "golang.org/x/net/context"
- ctxhttp "golang.org/x/net/context/ctxhttp"
- gensupport "google.golang.org/api/gensupport"
- googleapi "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = gensupport.MarshalJSON
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Canceled
-var _ = ctxhttp.Do
-
-const apiId = "storage:v1"
-const apiName = "storage"
-const apiVersion = "v1"
-const basePath = "https://www.googleapis.com/storage/v1/"
-
-// OAuth2 scopes used by this API.
-const (
- // View and manage your data across Google Cloud Platform services
- CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
-
- // View your data across Google Cloud Platform services
- CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
-
- // Manage your data and permissions in Google Cloud Storage
- DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
-
- // View your data in Google Cloud Storage
- DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
-
- // Manage your data in Google Cloud Storage
- DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.BucketAccessControls = NewBucketAccessControlsService(s)
- s.Buckets = NewBucketsService(s)
- s.Channels = NewChannelsService(s)
- s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
- s.ObjectAccessControls = NewObjectAccessControlsService(s)
- s.Objects = NewObjectsService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- BucketAccessControls *BucketAccessControlsService
-
- Buckets *BucketsService
-
- Channels *ChannelsService
-
- DefaultObjectAccessControls *DefaultObjectAccessControlsService
-
- ObjectAccessControls *ObjectAccessControlsService
-
- Objects *ObjectsService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {
- rs := &BucketAccessControlsService{s: s}
- return rs
-}
-
-type BucketAccessControlsService struct {
- s *Service
-}
-
-func NewBucketsService(s *Service) *BucketsService {
- rs := &BucketsService{s: s}
- return rs
-}
-
-type BucketsService struct {
- s *Service
-}
-
-func NewChannelsService(s *Service) *ChannelsService {
- rs := &ChannelsService{s: s}
- return rs
-}
-
-type ChannelsService struct {
- s *Service
-}
-
-func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService {
- rs := &DefaultObjectAccessControlsService{s: s}
- return rs
-}
-
-type DefaultObjectAccessControlsService struct {
- s *Service
-}
-
-func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {
- rs := &ObjectAccessControlsService{s: s}
- return rs
-}
-
-type ObjectAccessControlsService struct {
- s *Service
-}
-
-func NewObjectsService(s *Service) *ObjectsService {
- rs := &ObjectsService{s: s}
- return rs
-}
-
-type ObjectsService struct {
- s *Service
-}
-
-// Bucket: A bucket.
-type Bucket struct {
- // Acl: Access controls on the bucket.
- Acl []*BucketAccessControl `json:"acl,omitempty"`
-
- // Cors: The bucket's Cross-Origin Resource Sharing (CORS)
- // configuration.
- Cors []*BucketCors `json:"cors,omitempty"`
-
- // DefaultObjectAcl: Default access controls to apply to new objects
- // when no ACL is provided.
- DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the bucket.
- Etag string `json:"etag,omitempty"`
-
- // Id: The ID of the bucket.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For buckets, this is always
- // storage#bucket.
- Kind string `json:"kind,omitempty"`
-
- // Lifecycle: The bucket's lifecycle configuration. See lifecycle
- // management for more information.
- Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"`
-
- // Location: The location of the bucket. Object data for objects in the
- // bucket resides in physical storage within this region. Defaults to
- // US. See the developer's guide for the authoritative list.
- Location string `json:"location,omitempty"`
-
- // Logging: The bucket's logging configuration, which defines the
- // destination bucket and optional name prefix for the current bucket's
- // logs.
- Logging *BucketLogging `json:"logging,omitempty"`
-
- // Metageneration: The metadata generation of this bucket.
- Metageneration int64 `json:"metageneration,omitempty,string"`
-
- // Name: The name of the bucket.
- Name string `json:"name,omitempty"`
-
- // Owner: The owner of the bucket. This is always the project team's
- // owner group.
- Owner *BucketOwner `json:"owner,omitempty"`
-
- // ProjectNumber: The project number of the project the bucket belongs
- // to.
- ProjectNumber uint64 `json:"projectNumber,omitempty,string"`
-
- // SelfLink: The URI of this bucket.
- SelfLink string `json:"selfLink,omitempty"`
-
- // StorageClass: The bucket's storage class. This defines how objects in
- // the bucket are stored and determines the SLA and the cost of storage.
- // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY.
- // Defaults to STANDARD. For more information, see storage classes.
- StorageClass string `json:"storageClass,omitempty"`
-
- // TimeCreated: The creation time of the bucket in RFC 3339 format.
- TimeCreated string `json:"timeCreated,omitempty"`
-
- // Updated: The modification time of the bucket in RFC 3339 format.
- Updated string `json:"updated,omitempty"`
-
- // Versioning: The bucket's versioning configuration.
- Versioning *BucketVersioning `json:"versioning,omitempty"`
-
- // Website: The bucket's website configuration.
- Website *BucketWebsite `json:"website,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Acl") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *Bucket) MarshalJSON() ([]byte, error) {
- type noMethod Bucket
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-type BucketCors struct {
- // MaxAgeSeconds: The value, in seconds, to return in the
- // Access-Control-Max-Age header used in preflight responses.
- MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"`
-
- // Method: The list of HTTP methods on which to include CORS response
- // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
- // of methods, and means "any method".
- Method []string `json:"method,omitempty"`
-
- // Origin: The list of Origins eligible to receive CORS response
- // headers. Note: "*" is permitted in the list of origins, and means
- // "any Origin".
- Origin []string `json:"origin,omitempty"`
-
- // ResponseHeader: The list of HTTP headers other than the simple
- // response headers to give permission for the user-agent to share
- // across domains.
- ResponseHeader []string `json:"responseHeader,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketCors) MarshalJSON() ([]byte, error) {
- type noMethod BucketCors
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle
-// management for more information.
-type BucketLifecycle struct {
- // Rule: A lifecycle management rule, which is made of an action to take
- // and the condition(s) under which the action will be taken.
- Rule []*BucketLifecycleRule `json:"rule,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Rule") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketLifecycle) MarshalJSON() ([]byte, error) {
- type noMethod BucketLifecycle
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-type BucketLifecycleRule struct {
- // Action: The action to take.
- Action *BucketLifecycleRuleAction `json:"action,omitempty"`
-
- // Condition: The condition(s) under which the action will be taken.
- Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Action") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
- type noMethod BucketLifecycleRule
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketLifecycleRuleAction: The action to take.
-type BucketLifecycleRuleAction struct {
- // Type: Type of the action. Currently, only Delete is supported.
- Type string `json:"type,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Type") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) {
- type noMethod BucketLifecycleRuleAction
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketLifecycleRuleCondition: The condition(s) under which the action
-// will be taken.
-type BucketLifecycleRuleCondition struct {
- // Age: Age of an object (in days). This condition is satisfied when an
- // object reaches the specified age.
- Age int64 `json:"age,omitempty"`
-
- // CreatedBefore: A date in RFC 3339 format with only the date part (for
- // instance, "2013-01-15"). This condition is satisfied when an object
- // is created before midnight of the specified date in UTC.
- CreatedBefore string `json:"createdBefore,omitempty"`
-
- // IsLive: Relevant only for versioned objects. If the value is true,
- // this condition matches live objects; if the value is false, it
- // matches archived objects.
- IsLive bool `json:"isLive,omitempty"`
-
- // NumNewerVersions: Relevant only for versioned objects. If the value
- // is N, this condition is satisfied when there are at least N versions
- // (including the live version) newer than this version of the object.
- NumNewerVersions int64 `json:"numNewerVersions,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Age") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) {
- type noMethod BucketLifecycleRuleCondition
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketLogging: The bucket's logging configuration, which defines the
-// destination bucket and optional name prefix for the current bucket's
-// logs.
-type BucketLogging struct {
- // LogBucket: The destination bucket where the current bucket's logs
- // should be placed.
- LogBucket string `json:"logBucket,omitempty"`
-
- // LogObjectPrefix: A prefix for log object names.
- LogObjectPrefix string `json:"logObjectPrefix,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "LogBucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketLogging) MarshalJSON() ([]byte, error) {
- type noMethod BucketLogging
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketOwner: The owner of the bucket. This is always the project
-// team's owner group.
-type BucketOwner struct {
- // Entity: The entity, in the form project-owner-projectId.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity.
- EntityId string `json:"entityId,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Entity") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketOwner) MarshalJSON() ([]byte, error) {
- type noMethod BucketOwner
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketVersioning: The bucket's versioning configuration.
-type BucketVersioning struct {
- // Enabled: While set to true, versioning is fully enabled for this
- // bucket.
- Enabled bool `json:"enabled,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Enabled") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketVersioning) MarshalJSON() ([]byte, error) {
- type noMethod BucketVersioning
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketWebsite: The bucket's website configuration.
-type BucketWebsite struct {
- // MainPageSuffix: Behaves as the bucket's directory index where missing
- // objects are treated as potential directories.
- MainPageSuffix string `json:"mainPageSuffix,omitempty"`
-
- // NotFoundPage: The custom object to return when a requested resource
- // is not found.
- NotFoundPage string `json:"notFoundPage,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
- type noMethod BucketWebsite
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketAccessControl: An access-control entry.
-type BucketAccessControl struct {
- // Bucket: The name of the bucket.
- Bucket string `json:"bucket,omitempty"`
-
- // Domain: The domain associated with the entity, if any.
- Domain string `json:"domain,omitempty"`
-
- // Email: The email address associated with the entity, if any.
- Email string `json:"email,omitempty"`
-
- // Entity: The entity holding the permission, in one of the following
- // forms:
- // - user-userId
- // - user-email
- // - group-groupId
- // - group-email
- // - domain-domain
- // - project-team-projectId
- // - allUsers
- // - allAuthenticatedUsers Examples:
- // - The user liz@example.com would be user-liz@example.com.
- // - The group example@googlegroups.com would be
- // group-example@googlegroups.com.
- // - To refer to all members of the Google Apps for Business domain
- // example.com, the entity would be domain-example.com.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity, if any.
- EntityId string `json:"entityId,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the access-control entry.
- Etag string `json:"etag,omitempty"`
-
- // Id: The ID of the access-control entry.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For bucket access control entries,
- // this is always storage#bucketAccessControl.
- Kind string `json:"kind,omitempty"`
-
- // ProjectTeam: The project team associated with the entity, if any.
- ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"`
-
- // Role: The access permission for the entity. Can be READER, WRITER, or
- // OWNER.
- Role string `json:"role,omitempty"`
-
- // SelfLink: The link to this access-control entry.
- SelfLink string `json:"selfLink,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
- type noMethod BucketAccessControl
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketAccessControlProjectTeam: The project team associated with the
-// entity, if any.
-type BucketAccessControlProjectTeam struct {
- // ProjectNumber: The project number.
- ProjectNumber string `json:"projectNumber,omitempty"`
-
- // Team: The team. Can be owners, editors, or viewers.
- Team string `json:"team,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type noMethod BucketAccessControlProjectTeam
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// BucketAccessControls: An access-control list.
-type BucketAccessControls struct {
- // Items: The list of items.
- Items []*BucketAccessControl `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of bucket access control
- // entries, this is always storage#bucketAccessControls.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
- type noMethod BucketAccessControls
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// Buckets: A list of buckets.
-type Buckets struct {
- // Items: The list of items.
- Items []*Bucket `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of buckets, this is always
- // storage#buckets.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: The continuation token, used to page through large
- // result sets. Provide this value in a subsequent request to return the
- // next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *Buckets) MarshalJSON() ([]byte, error) {
- type noMethod Buckets
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// Channel: An notification channel used to watch for resource changes.
-type Channel struct {
- // Address: The address where notifications are delivered for this
- // channel.
- Address string `json:"address,omitempty"`
-
- // Expiration: Date and time of notification channel expiration,
- // expressed as a Unix timestamp, in milliseconds. Optional.
- Expiration int64 `json:"expiration,omitempty,string"`
-
- // Id: A UUID or similar unique string that identifies this channel.
- Id string `json:"id,omitempty"`
-
- // Kind: Identifies this as a notification channel used to watch for
- // changes to a resource. Value: the fixed string "api#channel".
- Kind string `json:"kind,omitempty"`
-
- // Params: Additional parameters controlling delivery channel behavior.
- // Optional.
- Params map[string]string `json:"params,omitempty"`
-
- // Payload: A Boolean value to indicate whether payload is wanted.
- // Optional.
- Payload bool `json:"payload,omitempty"`
-
- // ResourceId: An opaque ID that identifies the resource being watched
- // on this channel. Stable across different API versions.
- ResourceId string `json:"resourceId,omitempty"`
-
- // ResourceUri: A version-specific identifier for the watched resource.
- ResourceUri string `json:"resourceUri,omitempty"`
-
- // Token: An arbitrary string delivered to the target address with each
- // notification delivered over this channel. Optional.
- Token string `json:"token,omitempty"`
-
- // Type: The type of delivery mechanism used for this channel.
- Type string `json:"type,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Address") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *Channel) MarshalJSON() ([]byte, error) {
- type noMethod Channel
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ComposeRequest: A Compose request.
-type ComposeRequest struct {
- // Destination: Properties of the resulting object.
- Destination *Object `json:"destination,omitempty"`
-
- // Kind: The kind of item this is.
- Kind string `json:"kind,omitempty"`
-
- // SourceObjects: The list of source objects that will be concatenated
- // into a single object.
- SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Destination") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ComposeRequest) MarshalJSON() ([]byte, error) {
- type noMethod ComposeRequest
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-type ComposeRequestSourceObjects struct {
- // Generation: The generation of this object to use as the source.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Name: The source object's name. The source object's bucket is
- // implicitly the destination bucket.
- Name string `json:"name,omitempty"`
-
- // ObjectPreconditions: Conditions that must be met for this operation
- // to execute.
- ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Generation") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) {
- type noMethod ComposeRequestSourceObjects
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must
-// be met for this operation to execute.
-type ComposeRequestSourceObjectsObjectPreconditions struct {
- // IfGenerationMatch: Only perform the composition if the generation of
- // the source object that would be used matches this value. If this
- // value and a generation are both specified, they must be the same
- // value or the call will fail.
- IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"`
-
- // ForceSendFields is a list of field names (e.g. "IfGenerationMatch")
- // to unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) {
- type noMethod ComposeRequestSourceObjectsObjectPreconditions
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// Object: An object.
-type Object struct {
- // Acl: Access controls on the object.
- Acl []*ObjectAccessControl `json:"acl,omitempty"`
-
- // Bucket: The name of the bucket containing this object.
- Bucket string `json:"bucket,omitempty"`
-
- // CacheControl: Cache-Control directive for the object data.
- CacheControl string `json:"cacheControl,omitempty"`
-
- // ComponentCount: Number of underlying components that make up this
- // object. Components are accumulated by compose operations.
- ComponentCount int64 `json:"componentCount,omitempty"`
-
- // ContentDisposition: Content-Disposition of the object data.
- ContentDisposition string `json:"contentDisposition,omitempty"`
-
- // ContentEncoding: Content-Encoding of the object data.
- ContentEncoding string `json:"contentEncoding,omitempty"`
-
- // ContentLanguage: Content-Language of the object data.
- ContentLanguage string `json:"contentLanguage,omitempty"`
-
- // ContentType: Content-Type of the object data.
- ContentType string `json:"contentType,omitempty"`
-
- // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B;
- // encoded using base64 in big-endian byte order. For more information
- // about using the CRC32c checksum, see Hashes and ETags: Best
- // Practices.
- Crc32c string `json:"crc32c,omitempty"`
-
- // CustomerEncryption: Metadata of customer-supplied encryption key, if
- // the object is encrypted by such a key.
- CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the object.
- Etag string `json:"etag,omitempty"`
-
- // Generation: The content generation of this object. Used for object
- // versioning.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Id: The ID of the object.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For objects, this is always
- // storage#object.
- Kind string `json:"kind,omitempty"`
-
- // Md5Hash: MD5 hash of the data; encoded using base64. For more
- // information about using the MD5 hash, see Hashes and ETags: Best
- // Practices.
- Md5Hash string `json:"md5Hash,omitempty"`
-
- // MediaLink: Media download link.
- MediaLink string `json:"mediaLink,omitempty"`
-
- // Metadata: User-provided metadata, in key/value pairs.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Metageneration: The version of the metadata for this object at this
- // generation. Used for preconditions and for detecting changes in
- // metadata. A metageneration number is only meaningful in the context
- // of a particular generation of a particular object.
- Metageneration int64 `json:"metageneration,omitempty,string"`
-
- // Name: The name of this object. Required if not specified by URL
- // parameter.
- Name string `json:"name,omitempty"`
-
- // Owner: The owner of the object. This will always be the uploader of
- // the object.
- Owner *ObjectOwner `json:"owner,omitempty"`
-
- // SelfLink: The link to this object.
- SelfLink string `json:"selfLink,omitempty"`
-
- // Size: Content-Length of the data in bytes.
- Size uint64 `json:"size,omitempty,string"`
-
- // StorageClass: Storage class of the object.
- StorageClass string `json:"storageClass,omitempty"`
-
- // TimeCreated: The creation time of the object in RFC 3339 format.
- TimeCreated string `json:"timeCreated,omitempty"`
-
- // TimeDeleted: The deletion time of the object in RFC 3339 format. Will
- // be returned if and only if this version of the object has been
- // deleted.
- TimeDeleted string `json:"timeDeleted,omitempty"`
-
- // Updated: The modification time of the object metadata in RFC 3339
- // format.
- Updated string `json:"updated,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Acl") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *Object) MarshalJSON() ([]byte, error) {
- type noMethod Object
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ObjectCustomerEncryption: Metadata of customer-supplied encryption
-// key, if the object is encrypted by such a key.
-type ObjectCustomerEncryption struct {
- // EncryptionAlgorithm: The encryption algorithm.
- EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"`
-
- // KeySha256: SHA256 hash value of the encryption key.
- KeySha256 string `json:"keySha256,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm")
- // to unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) {
- type noMethod ObjectCustomerEncryption
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ObjectOwner: The owner of the object. This will always be the
-// uploader of the object.
-type ObjectOwner struct {
- // Entity: The entity, in the form user-userId.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity.
- EntityId string `json:"entityId,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Entity") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
- type noMethod ObjectOwner
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ObjectAccessControl: An access-control entry.
-type ObjectAccessControl struct {
- // Bucket: The name of the bucket.
- Bucket string `json:"bucket,omitempty"`
-
- // Domain: The domain associated with the entity, if any.
- Domain string `json:"domain,omitempty"`
-
- // Email: The email address associated with the entity, if any.
- Email string `json:"email,omitempty"`
-
- // Entity: The entity holding the permission, in one of the following
- // forms:
- // - user-userId
- // - user-email
- // - group-groupId
- // - group-email
- // - domain-domain
- // - project-team-projectId
- // - allUsers
- // - allAuthenticatedUsers Examples:
- // - The user liz@example.com would be user-liz@example.com.
- // - The group example@googlegroups.com would be
- // group-example@googlegroups.com.
- // - To refer to all members of the Google Apps for Business domain
- // example.com, the entity would be domain-example.com.
- Entity string `json:"entity,omitempty"`
-
- // EntityId: The ID for the entity, if any.
- EntityId string `json:"entityId,omitempty"`
-
- // Etag: HTTP 1.1 Entity tag for the access-control entry.
- Etag string `json:"etag,omitempty"`
-
- // Generation: The content generation of the object.
- Generation int64 `json:"generation,omitempty,string"`
-
- // Id: The ID of the access-control entry.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For object access control entries,
- // this is always storage#objectAccessControl.
- Kind string `json:"kind,omitempty"`
-
- // Object: The name of the object.
- Object string `json:"object,omitempty"`
-
- // ProjectTeam: The project team associated with the entity, if any.
- ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"`
-
- // Role: The access permission for the entity. Can be READER or OWNER.
- Role string `json:"role,omitempty"`
-
- // SelfLink: The link to this access-control entry.
- SelfLink string `json:"selfLink,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bucket") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
- type noMethod ObjectAccessControl
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ObjectAccessControlProjectTeam: The project team associated with the
-// entity, if any.
-type ObjectAccessControlProjectTeam struct {
- // ProjectNumber: The project number.
- ProjectNumber string `json:"projectNumber,omitempty"`
-
- // Team: The team. Can be owners, editors, or viewers.
- Team string `json:"team,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
- type noMethod ObjectAccessControlProjectTeam
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// ObjectAccessControls: An access-control list.
-type ObjectAccessControls struct {
- // Items: The list of items.
- Items []interface{} `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of object access control
- // entries, this is always storage#objectAccessControls.
- Kind string `json:"kind,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
- type noMethod ObjectAccessControls
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// Objects: A list of objects.
-type Objects struct {
- // Items: The list of items.
- Items []*Object `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of objects, this is always
- // storage#objects.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: The continuation token, used to page through large
- // result sets. Provide this value in a subsequent request to return the
- // next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // Prefixes: The list of prefixes of objects matching-but-not-listed up
- // to and including the requested delimiter.
- Prefixes []string `json:"prefixes,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *Objects) MarshalJSON() ([]byte, error) {
- type noMethod Objects
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// RewriteResponse: A rewrite response.
-type RewriteResponse struct {
- // Done: true if the copy is finished; otherwise, false if the copy is
- // in progress. This property is always present in the response.
- Done bool `json:"done,omitempty"`
-
- // Kind: The kind of item this is.
- Kind string `json:"kind,omitempty"`
-
- // ObjectSize: The total size of the object being copied in bytes. This
- // property is always present in the response.
- ObjectSize uint64 `json:"objectSize,omitempty,string"`
-
- // Resource: A resource containing the metadata for the copied-to
- // object. This property is present in the response only when copying
- // completes.
- Resource *Object `json:"resource,omitempty"`
-
- // RewriteToken: A token to use in subsequent requests to continue
- // copying data. This token is present in the response only when there
- // is more data to copy.
- RewriteToken string `json:"rewriteToken,omitempty"`
-
- // TotalBytesRewritten: The total bytes written so far, which can be
- // used to provide a waiting user with a progress indicator. This
- // property is always present in the response.
- TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Done") to
- // unconditionally include in API requests. By default, fields with
- // empty values are omitted from API requests. However, any non-pointer,
- // non-interface field appearing in ForceSendFields will be sent to the
- // server regardless of whether the field is empty or not. This may be
- // used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-}
-
-func (s *RewriteResponse) MarshalJSON() ([]byte, error) {
- type noMethod RewriteResponse
- raw := noMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields)
-}
-
-// method id "storage.bucketAccessControls.delete":
-
-type BucketAccessControlsDeleteCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Delete: Permanently deletes the ACL entry for the specified entity on
-// the specified bucket.
-func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {
- c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.delete" call.
-func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.bucketAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.get":
-
-type BucketAccessControlsGetCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// Get: Returns the ACL entry for the specified entity on the specified
-// bucket.
-func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {
- c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.get" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.bucketAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.insert":
-
-type BucketAccessControlsInsertCall struct {
- s *Service
- bucket string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Insert: Creates a new ACL entry on the specified bucket.
-func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {
- c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.insert" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new ACL entry on the specified bucket.",
- // "httpMethod": "POST",
- // "id": "storage.bucketAccessControls.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.list":
-
-type BucketAccessControlsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// List: Retrieves ACL entries on the specified bucket.
-func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {
- c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.list" call.
-// Exactly one of *BucketAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves ACL entries on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.bucketAccessControls.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl",
- // "response": {
- // "$ref": "BucketAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.patch":
-
-type BucketAccessControlsPatchCall struct {
- s *Service
- bucket string
- entity string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Patch: Updates an ACL entry on the specified bucket. This method
-// supports patch semantics.
-func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
- c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.patch" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.bucketAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.bucketAccessControls.update":
-
-type BucketAccessControlsUpdateCall struct {
- s *Service
- bucket string
- entity string
- bucketaccesscontrol *BucketAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Update: Updates an ACL entry on the specified bucket.
-func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {
- c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.bucketaccesscontrol = bucketaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.bucketAccessControls.update" call.
-// Exactly one of *BucketAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *BucketAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &BucketAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.bucketAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/acl/{entity}",
- // "request": {
- // "$ref": "BucketAccessControl"
- // },
- // "response": {
- // "$ref": "BucketAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.delete":
-
-type BucketsDeleteCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Delete: Permanently deletes an empty bucket.
-func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {
- c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": If set, only deletes the bucket if its
-// metageneration matches this value.
-func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": If set, only deletes the bucket if its
-// metageneration does not match this value.
-func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.delete" call.
-func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes an empty bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.buckets.delete",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "If set, only deletes the bucket if its metageneration matches this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "If set, only deletes the bucket if its metageneration does not match this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.get":
-
-type BucketsGetCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// Get: Returns metadata for the specified bucket.
-func (r *BucketsService) Get(bucket string) *BucketsGetCall {
- c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit acl and defaultObjectAcl properties.
-func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.get" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns metadata for the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.get",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.insert":
-
-type BucketsInsertCall struct {
- s *Service
- bucket *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Insert: Creates a new bucket.
-func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall {
- c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.urlParams_.Set("project", projectid)
- c.bucket = bucket
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the bucket resource
-// specifies acl or defaultObjectAcl properties, when it defaults to
-// full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit acl and defaultObjectAcl properties.
-func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.SetOpaque(req.URL)
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.insert" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new bucket.",
- // "httpMethod": "POST",
- // "id": "storage.buckets.insert",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "description": "A valid API project identifier.",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.list":
-
-type BucketsListCall struct {
- s *Service
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// List: Retrieves a list of buckets for a given project.
-func (r *BucketsService) List(projectid string) *BucketsListCall {
- c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.urlParams_.Set("project", projectid)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of buckets to return.
-func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// buckets whose names begin with this prefix.
-func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit acl and defaultObjectAcl properties.
-func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.SetOpaque(req.URL)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.list" call.
-// Exactly one of *Buckets or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Buckets.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Buckets{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of buckets for a given project.",
- // "httpMethod": "GET",
- // "id": "storage.buckets.list",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "maxResults": {
- // "description": "Maximum number of buckets to return.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to buckets whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "description": "A valid API project identifier.",
- // "location": "query",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b",
- // "response": {
- // "$ref": "Buckets"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-// method id "storage.buckets.patch":
-
-type BucketsPatchCall struct {
- s *Service
- bucket string
- bucket2 *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Patch: Updates a bucket. This method supports patch semantics.
-func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
- c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucket2 = bucket2
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit acl and defaultObjectAcl properties.
-func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.patch" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a bucket. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.buckets.patch",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.buckets.update":
-
-type BucketsUpdateCall struct {
- s *Service
- bucket string
- bucket2 *Bucket
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Update: Updates a bucket.
-func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
- c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.bucket2 = bucket2
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration matches
-// the given value.
-func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
-// conditional on whether the bucket's current metageneration does not
-// match the given value.
-func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Project team owners get OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "private" - Project team owners get OWNER access.
-// "projectPrivate" - Project team members get access according to
-// their roles.
-// "publicRead" - Project team owners get OWNER access, and allUsers
-// get READER access.
-// "publicReadWrite" - Project team owners get OWNER access, and
-// allUsers get WRITER access.
-func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// PredefinedDefaultObjectAcl sets the optional parameter
-// "predefinedDefaultObjectAcl": Apply a predefined set of default
-// object access controls to this bucket.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall {
- c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit acl and defaultObjectAcl properties.
-func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.buckets.update" call.
-// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Bucket.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Bucket{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.buckets.update",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "private",
- // "projectPrivate",
- // "publicRead",
- // "publicReadWrite"
- // ],
- // "enumDescriptions": [
- // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
- // "Project team owners get OWNER access.",
- // "Project team members get access according to their roles.",
- // "Project team owners get OWNER access, and allUsers get READER access.",
- // "Project team owners get OWNER access, and allUsers get WRITER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedDefaultObjectAcl": {
- // "description": "Apply a predefined set of default object access controls to this bucket.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit acl and defaultObjectAcl properties."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}",
- // "request": {
- // "$ref": "Bucket"
- // },
- // "response": {
- // "$ref": "Bucket"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.channels.stop":
-
-type ChannelsStopCall struct {
- s *Service
- channel *Channel
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Stop: Stop watching resources through this channel
-func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {
- c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.channel = channel
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.SetOpaque(req.URL)
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.channels.stop" call.
-func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Stop watching resources through this channel",
- // "httpMethod": "POST",
- // "id": "storage.channels.stop",
- // "path": "channels/stop",
- // "request": {
- // "$ref": "Channel",
- // "parameterName": "resource"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.delete":
-
-type DefaultObjectAccessControlsDeleteCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Delete: Permanently deletes the default object ACL entry for the
-// specified entity on the specified bucket.
-func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall {
- c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.delete" call.
-func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "DELETE",
- // "id": "storage.defaultObjectAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.get":
-
-type DefaultObjectAccessControlsGetCall struct {
- s *Service
- bucket string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// Get: Returns the default object ACL entry for the specified entity on
-// the specified bucket.
-func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall {
- c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.get" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.defaultObjectAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.insert":
-
-type DefaultObjectAccessControlsInsertCall struct {
- s *Service
- bucket string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Insert: Creates a new default object ACL entry on the specified
-// bucket.
-func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall {
- c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.insert" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new default object ACL entry on the specified bucket.",
- // "httpMethod": "POST",
- // "id": "storage.defaultObjectAccessControls.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.list":
-
-type DefaultObjectAccessControlsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// List: Retrieves default object ACL entries on the specified bucket.
-func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall {
- c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": If present, only return default ACL listing
-// if the bucket's current metageneration matches this value.
-func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": If present, only return default ACL
-// listing if the bucket's current metageneration does not match the
-// given value.
-func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.list" call.
-// Exactly one of *ObjectAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves default object ACL entries on the specified bucket.",
- // "httpMethod": "GET",
- // "id": "storage.defaultObjectAccessControls.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl",
- // "response": {
- // "$ref": "ObjectAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.patch":
-
-type DefaultObjectAccessControlsPatchCall struct {
- s *Service
- bucket string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Patch: Updates a default object ACL entry on the specified bucket.
-// This method supports patch semantics.
-func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {
- c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.patch" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.defaultObjectAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.defaultObjectAccessControls.update":
-
-type DefaultObjectAccessControlsUpdateCall struct {
- s *Service
- bucket string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Update: Updates a default object ACL entry on the specified bucket.
-func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall {
- c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.defaultObjectAccessControls.update" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates a default object ACL entry on the specified bucket.",
- // "httpMethod": "PUT",
- // "id": "storage.defaultObjectAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/defaultObjectAcl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.delete":
-
-type ObjectAccessControlsDeleteCall struct {
- s *Service
- bucket string
- object string
- entity string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Delete: Permanently deletes the ACL entry for the specified entity on
-// the specified object.
-func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
- c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.delete" call.
-func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
- // "httpMethod": "DELETE",
- // "id": "storage.objectAccessControls.delete",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.get":
-
-type ObjectAccessControlsGetCall struct {
- s *Service
- bucket string
- object string
- entity string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// Get: Returns the ACL entry for the specified entity on the specified
-// object.
-func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
- c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.get" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the ACL entry for the specified entity on the specified object.",
- // "httpMethod": "GET",
- // "id": "storage.objectAccessControls.get",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.insert":
-
-type ObjectAccessControlsInsertCall struct {
- s *Service
- bucket string
- object string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Insert: Creates a new ACL entry on the specified object.
-func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
- c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.insert" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new ACL entry on the specified object.",
- // "httpMethod": "POST",
- // "id": "storage.objectAccessControls.insert",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.list":
-
-type ObjectAccessControlsListCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// List: Retrieves ACL entries on the specified object.
-func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
- c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.list" call.
-// Exactly one of *ObjectAccessControls or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControls.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControls{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves ACL entries on the specified object.",
- // "httpMethod": "GET",
- // "id": "storage.objectAccessControls.list",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl",
- // "response": {
- // "$ref": "ObjectAccessControls"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.patch":
-
-type ObjectAccessControlsPatchCall struct {
- s *Service
- bucket string
- object string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Patch: Updates an ACL entry on the specified object. This method
-// supports patch semantics.
-func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
- c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.patch" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.objectAccessControls.patch",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objectAccessControls.update":
-
-type ObjectAccessControlsUpdateCall struct {
- s *Service
- bucket string
- object string
- entity string
- objectaccesscontrol *ObjectAccessControl
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Update: Updates an ACL entry on the specified object.
-func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
- c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.entity = entity
- c.objectaccesscontrol = objectaccesscontrol
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- "entity": c.entity,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objectAccessControls.update" call.
-// Exactly one of *ObjectAccessControl or error will be non-nil. Any
-// non-2xx status code is an error. Response headers are in either
-// *ObjectAccessControl.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &ObjectAccessControl{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an ACL entry on the specified object.",
- // "httpMethod": "PUT",
- // "id": "storage.objectAccessControls.update",
- // "parameterOrder": [
- // "bucket",
- // "object",
- // "entity"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of a bucket.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "entity": {
- // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}/acl/{entity}",
- // "request": {
- // "$ref": "ObjectAccessControl"
- // },
- // "response": {
- // "$ref": "ObjectAccessControl"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control"
- // ]
- // }
-
-}
-
-// method id "storage.objects.compose":
-
-type ObjectsComposeCall struct {
- s *Service
- destinationBucket string
- destinationObject string
- composerequest *ComposeRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Compose: Concatenates a list of existing objects into a new object in
-// the same bucket.
-func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {
- c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.composerequest = composerequest
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value.
-func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do and Download
-// methods. Any pending HTTP request will be aborted if the provided
-// context is canceled.
-func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Download fetches the API endpoint's "media" value, instead of the normal
-// API response value. If the returned error is nil, the Response is guaranteed to
-// have a 2xx status code. Callers must close the Response.Body as usual.
-func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("media")
- if err != nil {
- return nil, err
- }
- if err := googleapi.CheckMediaResponse(res); err != nil {
- res.Body.Close()
- return nil, err
- }
- return res, nil
-}
-
-// Do executes the "storage.objects.compose" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Concatenates a list of existing objects into a new object in the same bucket.",
- // "httpMethod": "POST",
- // "id": "storage.objects.compose",
- // "parameterOrder": [
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{destinationBucket}/o/{destinationObject}/compose",
- // "request": {
- // "$ref": "ComposeRequest"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.copy":
-
-type ObjectsCopyCall struct {
- s *Service
- sourceBucket string
- sourceObject string
- destinationBucket string
- destinationObject string
- object *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Copy: Copies a source object to a destination object. Optionally
-// overrides metadata.
-func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {
- c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.sourceBucket = sourceBucket
- c.sourceObject = sourceObject
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.object = object
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the destination object's
-// current generation matches the given value.
-func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the destination object's current generation does not match the given
-// value.
-func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the destination object's current metageneration matches the given
-// value.
-func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the destination object's current metageneration does not
-// match the given value.
-func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// IfSourceGenerationMatch sets the optional parameter
-// "ifSourceGenerationMatch": Makes the operation conditional on whether
-// the source object's generation matches the given value.
-func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
- return c
-}
-
-// IfSourceGenerationNotMatch sets the optional parameter
-// "ifSourceGenerationNotMatch": Makes the operation conditional on
-// whether the source object's generation does not match the given
-// value.
-func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
- return c
-}
-
-// IfSourceMetagenerationMatch sets the optional parameter
-// "ifSourceMetagenerationMatch": Makes the operation conditional on
-// whether the source object's current metageneration matches the given
-// value.
-func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
- return c
-}
-
-// IfSourceMetagenerationNotMatch sets the optional parameter
-// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
-// whether the source object's current metageneration does not match the
-// given value.
-func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall {
- c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// SourceGeneration sets the optional parameter "sourceGeneration": If
-// present, selects a specific revision of the source object (as opposed
-// to the latest version, the default).
-func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall {
- c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do and Download
-// methods. Any pending HTTP request will be aborted if the provided
-// context is canceled.
-func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "sourceBucket": c.sourceBucket,
- "sourceObject": c.sourceObject,
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Download fetches the API endpoint's "media" value, instead of the normal
-// API response value. If the returned error is nil, the Response is guaranteed to
-// have a 2xx status code. Callers must close the Response.Body as usual.
-func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("media")
- if err != nil {
- return nil, err
- }
- if err := googleapi.CheckMediaResponse(res); err != nil {
- res.Body.Close()
- return nil, err
- }
- return res, nil
-}
-
-// Do executes the "storage.objects.copy" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Copies a source object to a destination object. Optionally overrides metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.copy",
- // "parameterOrder": [
- // "sourceBucket",
- // "sourceObject",
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "sourceBucket": {
- // "description": "Name of the bucket in which to find the source object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "sourceGeneration": {
- // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.delete":
-
-type ObjectsDeleteCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Delete: Deletes an object and its metadata. Deletions are permanent
-// if versioning is not enabled for the bucket, or if the generation
-// parameter is used.
-func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
- c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// permanently deletes a specific revision of this object (as opposed to
-// the latest version, the default).
-func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value.
-func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value.
-func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.delete" call.
-func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
- // "httpMethod": "DELETE",
- // "id": "storage.objects.delete",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.get":
-
-type ObjectsGetCall struct {
- s *Service
- bucket string
- object string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// Get: Retrieves an object or its metadata.
-func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
- c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's generation
-// matches the given value.
-func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's generation does not match the given value.
-func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do and Download
-// methods. Any pending HTTP request will be aborted if the provided
-// context is canceled.
-func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Download fetches the API endpoint's "media" value, instead of the normal
-// API response value. If the returned error is nil, the Response is guaranteed to
-// have a 2xx status code. Callers must close the Response.Body as usual.
-func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("media")
- if err != nil {
- return nil, err
- }
- if err := googleapi.CheckMediaResponse(res); err != nil {
- res.Body.Close()
- return nil, err
- }
- return res, nil
-}
-
-// Do executes the "storage.objects.get" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves an object or its metadata.",
- // "httpMethod": "GET",
- // "id": "storage.objects.get",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.insert":
-
-type ObjectsInsertCall struct {
- s *Service
- bucket string
- object *Object
- urlParams_ gensupport.URLParams
- media_ io.Reader
- resumableBuffer_ *gensupport.ResumableBuffer
- mediaType_ string
- mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_.
- progressUpdater_ googleapi.ProgressUpdater
- ctx_ context.Context
-}
-
-// Insert: Stores a new object and metadata.
-func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {
- c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- return c
-}
-
-// ContentEncoding sets the optional parameter "contentEncoding": If
-// set, sets the contentEncoding property of the final object to this
-// value. Setting this parameter is equivalent to setting the
-// contentEncoding metadata property. This can be useful when uploading
-// an object with uploadType=media to indicate the encoding of the
-// content being uploaded.
-func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall {
- c.urlParams_.Set("contentEncoding", contentEncoding)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value.
-func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value.
-func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Name sets the optional parameter "name": Name of the object. Required
-// when the object metadata is not otherwise provided. Overrides the
-// object metadata's name value, if any. For information about how to
-// URL encode object names to be path safe, see Encoding URI Path Parts.
-func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
- c.urlParams_.Set("name", name)
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Media specifies the media to upload in one or more chunks. The chunk
-// size may be controlled by supplying a MediaOption generated by
-// googleapi.ChunkSize. The chunk size defaults to
-// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
-// upload request will be determined by sniffing the contents of r,
-// unless a MediaOption generated by googleapi.ContentType is
-// supplied.
-// At most one of Media and ResumableMedia may be set.
-func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall {
- opts := googleapi.ProcessMediaOptions(options)
- chunkSize := opts.ChunkSize
- if !opts.ForceEmptyContentType {
- r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType)
- }
- c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize)
- return c
-}
-
-// ResumableMedia specifies the media to upload in chunks and can be
-// canceled with ctx.
-//
-// Deprecated: use Media instead.
-//
-// At most one of Media and ResumableMedia may be set. mediaType
-// identifies the MIME media type of the upload, such as "image/png". If
-// mediaType is "", it will be auto-detected. The provided ctx will
-// supersede any context previously provided to the Context method.
-func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {
- c.ctx_ = ctx
- rdr := gensupport.ReaderAtToReader(r, size)
- rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType)
- c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize)
- c.media_ = nil
- c.mediaSize_ = size
- return c
-}
-
-// ProgressUpdater provides a callback function that will be called
-// after every chunk. It should be a low-latency function in order to
-// not slow down the upload operation. This should only be called when
-// using ResumableMedia (as opposed to Media).
-func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {
- c.progressUpdater_ = pu
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-// This context will supersede any context previously provided to the
-// ResumableMedia method.
-func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
- if c.media_ != nil || c.resumableBuffer_ != nil {
- urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
- protocol := "multipart"
- if c.resumableBuffer_ != nil {
- protocol = "resumable"
- }
- c.urlParams_.Set("uploadType", protocol)
- }
- urls += "?" + c.urlParams_.Encode()
- if c.media_ != nil {
- var combined io.ReadCloser
- combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_)
- defer combined.Close()
- body = combined
- }
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- if c.resumableBuffer_ != nil && c.mediaType_ != "" {
- req.Header.Set("X-Upload-Content-Type", c.mediaType_)
- }
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.insert" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := gensupport.Retry(c.ctx_, func() (*http.Response, error) {
- return c.doRequest("json")
- }, gensupport.DefaultBackoffStrategy())
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- if c.resumableBuffer_ != nil {
- loc := res.Header.Get("Location")
- rx := &gensupport.ResumableUpload{
- Client: c.s.client,
- UserAgent: c.s.userAgent(),
- URI: loc,
- Media: c.resumableBuffer_,
- MediaType: c.mediaType_,
- Callback: func(curr int64) {
- if c.progressUpdater_ != nil {
- c.progressUpdater_(curr, c.mediaSize_)
- }
- },
- }
- ctx := c.ctx_
- if ctx == nil {
- ctx = context.TODO()
- }
- res, err = rx.Upload(ctx)
- if err != nil {
- return nil, err
- }
- defer res.Body.Close()
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Stores a new object and metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.insert",
- // "mediaUpload": {
- // "accept": [
- // "*/*"
- // ],
- // "protocols": {
- // "resumable": {
- // "multipart": true,
- // "path": "/resumable/upload/storage/v1/b/{bucket}/o"
- // },
- // "simple": {
- // "multipart": true,
- // "path": "/upload/storage/v1/b/{bucket}/o"
- // }
- // }
- // },
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "contentEncoding": {
- // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "name": {
- // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "query",
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "supportsMediaUpload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.list":
-
-type ObjectsListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
-}
-
-// List: Retrieves a list of objects matching the criteria.
-func (r *ObjectsService) List(bucket string) *ObjectsListCall {
- c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// Delimiter sets the optional parameter "delimiter": Returns results in
-// a directory-like mode. items will contain only objects whose names,
-// aside from the prefix, do not contain delimiter. Objects whose names,
-// aside from the prefix, contain delimiter will have their name,
-// truncated after the delimiter, returned in prefixes. Duplicate
-// prefixes are omitted.
-func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
- c.urlParams_.Set("delimiter", delimiter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of items plus prefixes to return. As duplicate prefixes are omitted,
-// fewer total results may be returned than requested. The default value
-// of this parameter is 1,000 items.
-func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// objects whose names begin with this prefix.
-func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Versions sets the optional parameter "versions": If true, lists all
-// versions of an object as distinct results. The default is false. For
-// more information, see Object Versioning.
-func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall {
- c.urlParams_.Set("versions", fmt.Sprint(versions))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- req.Header.Set("If-None-Match", c.ifNoneMatch_)
- }
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.list" call.
-// Exactly one of *Objects or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Objects.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Objects{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of objects matching the criteria.",
- // "httpMethod": "GET",
- // "id": "storage.objects.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to look for objects.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "delimiter": {
- // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- // "location": "query",
- // "type": "string"
- // },
- // "maxResults": {
- // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to objects whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "versions": {
- // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "b/{bucket}/o",
- // "response": {
- // "$ref": "Objects"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsSubscription": true
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-// method id "storage.objects.patch":
-
-type ObjectsPatchCall struct {
- s *Service
- bucket string
- object string
- object2 *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Patch: Updates an object's metadata. This method supports patch
-// semantics.
-func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
- c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.object2 = object2
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value.
-func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value.
-func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.patch" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an object's metadata. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "storage.objects.patch",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.rewrite":
-
-type ObjectsRewriteCall struct {
- s *Service
- sourceBucket string
- sourceObject string
- destinationBucket string
- destinationObject string
- object *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Rewrite: Rewrites a source object to a destination object. Optionally
-// overrides metadata.
-func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {
- c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.sourceBucket = sourceBucket
- c.sourceObject = sourceObject
- c.destinationBucket = destinationBucket
- c.destinationObject = destinationObject
- c.object = object
- return c
-}
-
-// DestinationPredefinedAcl sets the optional parameter
-// "destinationPredefinedAcl": Apply a predefined set of access controls
-// to the destination object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall {
- c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the destination object's
-// current generation matches the given value.
-func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the destination object's current generation does not match the given
-// value.
-func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the destination object's current metageneration matches the given
-// value.
-func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the destination object's current metageneration does not
-// match the given value.
-func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// IfSourceGenerationMatch sets the optional parameter
-// "ifSourceGenerationMatch": Makes the operation conditional on whether
-// the source object's generation matches the given value.
-func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
- return c
-}
-
-// IfSourceGenerationNotMatch sets the optional parameter
-// "ifSourceGenerationNotMatch": Makes the operation conditional on
-// whether the source object's generation does not match the given
-// value.
-func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
- return c
-}
-
-// IfSourceMetagenerationMatch sets the optional parameter
-// "ifSourceMetagenerationMatch": Makes the operation conditional on
-// whether the source object's current metageneration matches the given
-// value.
-func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
- return c
-}
-
-// IfSourceMetagenerationNotMatch sets the optional parameter
-// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
-// whether the source object's current metageneration does not match the
-// given value.
-func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall {
- c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
- return c
-}
-
-// MaxBytesRewrittenPerCall sets the optional parameter
-// "maxBytesRewrittenPerCall": The maximum number of bytes that will be
-// rewritten per rewrite request. Most callers shouldn't need to specify
-// this parameter - it is primarily in place to support testing. If
-// specified the value must be an integral multiple of 1 MiB (1048576).
-// Also, this only applies to requests where the source and destination
-// span locations and/or storage classes. Finally, this value must not
-// change across rewrite calls else you'll get an error that the
-// rewriteToken is invalid.
-func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall {
- c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall))
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl, unless the object resource
-// specifies the acl property, when it defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// RewriteToken sets the optional parameter "rewriteToken": Include this
-// field (from the previous rewrite response) on each rewrite request
-// after the first one, until the rewrite response 'done' flag is true.
-// Calls that provide a rewriteToken can omit all other request fields,
-// but if included those fields must match the values provided in the
-// first rewrite request.
-func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall {
- c.urlParams_.Set("rewriteToken", rewriteToken)
- return c
-}
-
-// SourceGeneration sets the optional parameter "sourceGeneration": If
-// present, selects a specific revision of the source object (as opposed
-// to the latest version, the default).
-func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall {
- c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "sourceBucket": c.sourceBucket,
- "sourceObject": c.sourceObject,
- "destinationBucket": c.destinationBucket,
- "destinationObject": c.destinationObject,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.rewrite" call.
-// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *RewriteResponse.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &RewriteResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
- // "httpMethod": "POST",
- // "id": "storage.objects.rewrite",
- // "parameterOrder": [
- // "sourceBucket",
- // "sourceObject",
- // "destinationBucket",
- // "destinationObject"
- // ],
- // "parameters": {
- // "destinationBucket": {
- // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationObject": {
- // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationPredefinedAcl": {
- // "description": "Apply a predefined set of access controls to the destination object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "maxBytesRewrittenPerCall": {
- // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "rewriteToken": {
- // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceBucket": {
- // "description": "Name of the bucket in which to find the source object.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "sourceGeneration": {
- // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceObject": {
- // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "RewriteResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.objects.update":
-
-type ObjectsUpdateCall struct {
- s *Service
- bucket string
- object string
- object2 *Object
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// Update: Updates an object's metadata.
-func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
- c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.object = object
- c.object2 = object2
- return c
-}
-
-// Generation sets the optional parameter "generation": If present,
-// selects a specific revision of this object (as opposed to the latest
-// version, the default).
-func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall {
- c.urlParams_.Set("generation", fmt.Sprint(generation))
- return c
-}
-
-// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
-// Makes the operation conditional on whether the object's current
-// generation matches the given value.
-func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
- return c
-}
-
-// IfGenerationNotMatch sets the optional parameter
-// "ifGenerationNotMatch": Makes the operation conditional on whether
-// the object's current generation does not match the given value.
-func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the operation conditional on whether
-// the object's current metageneration matches the given value.
-func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the operation conditional on
-// whether the object's current metageneration does not match the given
-// value.
-func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
-// predefined set of access controls to this object.
-//
-// Possible values:
-// "authenticatedRead" - Object owner gets OWNER access, and
-// allAuthenticatedUsers get READER access.
-// "bucketOwnerFullControl" - Object owner gets OWNER access, and
-// project team owners get OWNER access.
-// "bucketOwnerRead" - Object owner gets OWNER access, and project
-// team owners get READER access.
-// "private" - Object owner gets OWNER access.
-// "projectPrivate" - Object owner gets OWNER access, and project team
-// members get access according to their roles.
-// "publicRead" - Object owner gets OWNER access, and allUsers get
-// READER access.
-func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall {
- c.urlParams_.Set("predefinedAcl", predefinedAcl)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to full.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do and Download
-// methods. Any pending HTTP request will be aborted if the provided
-// context is canceled.
-func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "object": c.object,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Download fetches the API endpoint's "media" value, instead of the normal
-// API response value. If the returned error is nil, the Response is guaranteed to
-// have a 2xx status code. Callers must close the Response.Body as usual.
-func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("media")
- if err != nil {
- return nil, err
- }
- if err := googleapi.CheckMediaResponse(res); err != nil {
- res.Body.Close()
- return nil, err
- }
- return res, nil
-}
-
-// Do executes the "storage.objects.update" call.
-// Exactly one of *Object or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Object.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Object{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates an object's metadata.",
- // "httpMethod": "PUT",
- // "id": "storage.objects.update",
- // "parameterOrder": [
- // "bucket",
- // "object"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the object resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "generation": {
- // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifGenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "object": {
- // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "predefinedAcl": {
- // "description": "Apply a predefined set of access controls to this object.",
- // "enum": [
- // "authenticatedRead",
- // "bucketOwnerFullControl",
- // "bucketOwnerRead",
- // "private",
- // "projectPrivate",
- // "publicRead"
- // ],
- // "enumDescriptions": [
- // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
- // "Object owner gets OWNER access, and project team owners get OWNER access.",
- // "Object owner gets OWNER access, and project team owners get READER access.",
- // "Object owner gets OWNER access.",
- // "Object owner gets OWNER access, and project team members get access according to their roles.",
- // "Object owner gets OWNER access, and allUsers get READER access."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to full.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/o/{object}",
- // "request": {
- // "$ref": "Object"
- // },
- // "response": {
- // "$ref": "Object"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaDownload": true,
- // "useMediaDownloadService": true
- // }
-
-}
-
-// method id "storage.objects.watchAll":
-
-type ObjectsWatchAllCall struct {
- s *Service
- bucket string
- channel *Channel
- urlParams_ gensupport.URLParams
- ctx_ context.Context
-}
-
-// WatchAll: Watch for changes on all objects in a bucket.
-func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall {
- c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.channel = channel
- return c
-}
-
-// Delimiter sets the optional parameter "delimiter": Returns results in
-// a directory-like mode. items will contain only objects whose names,
-// aside from the prefix, do not contain delimiter. Objects whose names,
-// aside from the prefix, contain delimiter will have their name,
-// truncated after the delimiter, returned in prefixes. Duplicate
-// prefixes are omitted.
-func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {
- c.urlParams_.Set("delimiter", delimiter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of items plus prefixes to return. As duplicate prefixes are omitted,
-// fewer total results may be returned than requested. The default value
-// of this parameter is 1,000 items.
-func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// objects whose names begin with this prefix.
-func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// Projection sets the optional parameter "projection": Set of
-// properties to return. Defaults to noAcl.
-//
-// Possible values:
-// "full" - Include all properties.
-// "noAcl" - Omit the acl property.
-func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall {
- c.urlParams_.Set("projection", projection)
- return c
-}
-
-// Versions sets the optional parameter "versions": If true, lists all
-// versions of an object as distinct results. The default is false. For
-// more information, see Object Versioning.
-func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall {
- c.urlParams_.Set("versions", fmt.Sprint(versions))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall {
- c.ctx_ = ctx
- return c
-}
-
-func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- c.urlParams_.Set("alt", alt)
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch")
- urls += "?" + c.urlParams_.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- if c.ctx_ != nil {
- return ctxhttp.Do(c.ctx_, c.s.client, req)
- }
- return c.s.client.Do(req)
-}
-
-// Do executes the "storage.objects.watchAll" call.
-// Exactly one of *Channel or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Channel.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, &googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- }
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- ret := &Channel{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Watch for changes on all objects in a bucket.",
- // "httpMethod": "POST",
- // "id": "storage.objects.watchAll",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to look for objects.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "delimiter": {
- // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
- // "location": "query",
- // "type": "string"
- // },
- // "maxResults": {
- // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to objects whose names begin with this prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "projection": {
- // "description": "Set of properties to return. Defaults to noAcl.",
- // "enum": [
- // "full",
- // "noAcl"
- // ],
- // "enumDescriptions": [
- // "Include all properties.",
- // "Omit the acl property."
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "versions": {
- // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "b/{bucket}/o/watch",
- // "request": {
- // "$ref": "Channel",
- // "parameterName": "resource"
- // },
- // "response": {
- // "$ref": "Channel"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsSubscription": true
- // }
-
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml
deleted file mode 100644
index 7715209771..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-sudo: false
-
-go:
- - 1.4
-
-install:
- - go get -v -t -d google.golang.org/appengine/...
- - mkdir sdk
- - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip"
- - unzip sdk.zip -d sdk
- - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py
-
-script:
- - go version
- - go test -v google.golang.org/appengine/...
- - go test -v -race google.golang.org/appengine/...
- - sdk/go_appengine/goapp test -v google.golang.org/appengine/...
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md
deleted file mode 100644
index 1dbb3341f0..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# Go App Engine packages
-
-[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
-
-This repository supports the Go runtime on App Engine,
-including both classic App Engine and Managed VMs.
-It provides APIs for interacting with App Engine services.
-Its canonical import path is `google.golang.org/appengine`.
-
-See https://cloud.google.com/appengine/docs/go/
-for more information.
-
-File issue reports and feature requests on the [Google App Engine issue
-tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
-
-## Directory structure
-The top level directory of this repository is the `appengine` package. It
-contains the
-basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
-packages are in subdirectories (e.g. `datastore`).
-
-There is an `internal` subdirectory that contains service protocol buffers,
-plus packages required for connectivity to make API calls. App Engine apps
-should not directly import any package under `internal`.
-
-## Updating a Go App Engine app
-
-This section describes how to update a traditional Go App Engine app to use
-these packages.
-
-### 1. Update YAML files (Managed VMs only)
-
-The `app.yaml` file (and YAML files for modules) should have these new lines added:
-```
-vm: true
-```
-See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
-
-### 2. Update import paths
-
-The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
-You will need to update your code to use import paths starting with that; for instance,
-code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
-You can do that manually, or by running this command to recursively update all Go source files in the current directory:
-(may require GNU sed)
-```
-sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
- $(find . -name '*.go')
-```
-
-### 3. Update code using deprecated, removed or modified APIs
-
-Most App Engine services are available with exactly the same API.
-A few APIs were cleaned up, and some are not available yet.
-This list summarises the differences:
-
-* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
-* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
-* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
-* `appengine.Datacenter` now takes a `context.Context` argument.
-* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
-* `delay.Call` now returns an error.
-* `search.FieldLoadSaver` now handles document metadata.
-* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
- `context.Context` instead.
-* `aetest` no longer declares its own Context type, and uses the standard one instead.
-* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
- deprecated and unused for a long time.
-* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
- Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
-* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
- Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
-* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go
deleted file mode 100644
index be0b5f2bc1..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package appengine provides basic functionality for Google App Engine.
-//
-// For more information on how to write Go apps for Google App Engine, see:
-// https://cloud.google.com/appengine/docs/go/
-package appengine
-
-import (
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// IsDevAppServer reports whether the App Engine app is running in the
-// development App Server.
-func IsDevAppServer() bool {
- return internal.IsDevAppServer()
-}
-
-// NewContext returns a context for an in-flight HTTP request.
-// This function is cheap.
-func NewContext(req *http.Request) context.Context {
- return WithContext(context.Background(), req)
-}
-
-// WithContext returns a copy of the parent context
-// and associates it with an in-flight HTTP request.
-// This function is cheap.
-func WithContext(parent context.Context, req *http.Request) context.Context {
- return internal.WithContext(parent, req)
-}
-
-// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
-
-// BlobKey is a key for a blobstore blob.
-//
-// Conceptually, this type belongs in the blobstore package, but it lives in
-// the appengine package to avoid a circular dependency: blobstore depends on
-// datastore, and datastore needs to refer to the BlobKey type.
-type BlobKey string
-
-// GeoPoint represents a location as latitude/longitude in degrees.
-type GeoPoint struct {
- Lat, Lng float64
-}
-
-// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
-func (g GeoPoint) Valid() bool {
- return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
-}
-
-// APICallFunc defines a function type for handling an API call.
-// See WithCallOverride.
-type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
-
-// WithAPICallFunc returns a copy of the parent context
-// that will cause API calls to invoke f instead of their normal operation.
-//
-// This is intended for advanced users only.
-func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
- return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
-}
-
-// APICall performs an API call.
-//
-// This is not intended for general use; it is exported for use in conjunction
-// with WithAPICallFunc.
-func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
- return internal.Call(ctx, service, method, in, out)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go
deleted file mode 100644
index 2f7759067f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package appengine
-
-import (
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// The comment below must not be changed.
-// It is used by go-app-builder to recognise that this package has
-// the Main function to use in the synthetic main.
-// The gophers party all night; the rabbits provide the beats.
-
-// Main is the principal entry point for a Managed VMs app.
-// It installs a trivial health checker if one isn't already registered,
-// and starts listening on port 8080 (overridden by the $PORT environment
-// variable).
-//
-// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests
-// for details on how to do your own health checking.
-//
-// Main never returns.
-//
-// Main is designed so that the app's main package looks like this:
-//
-// package main
-//
-// import (
-// "google.golang.org/appengine"
-//
-// _ "myapp/package0"
-// _ "myapp/package1"
-// )
-//
-// func main() {
-// appengine.Main()
-// }
-//
-// The "myapp/packageX" packages are expected to register HTTP handlers
-// in their init functions.
-func Main() {
- internal.Main()
-}
-
-// BackgroundContext returns a context not associated with a request.
-// This should only be used when not servicing a request.
-// This only works on Managed VMs.
-func BackgroundContext() context.Context {
- return internal.BackgroundContext()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go
deleted file mode 100644
index 16d0772e2a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// This file provides error functions for common API failure modes.
-
-package appengine
-
-import (
- "fmt"
-
- "google.golang.org/appengine/internal"
-)
-
-// IsOverQuota reports whether err represents an API call failure
-// due to insufficient available quota.
-func IsOverQuota(err error) bool {
- callErr, ok := err.(*internal.CallError)
- return ok && callErr.Code == 4
-}
-
-// MultiError is returned by batch operations when there are errors with
-// particular elements. Errors will be in a one-to-one correspondence with
-// the input elements; successful elements will have a nil entry.
-type MultiError []error
-
-func (m MultiError) Error() string {
- s, n := "", 0
- for _, e := range m {
- if e != nil {
- if n == 0 {
- s = e.Error()
- }
- n++
- }
- }
- switch n {
- case 0:
- return "(0 errors)"
- case 1:
- return s
- case 2:
- return s + " (and 1 other error)"
- }
- return fmt.Sprintf("%s (and %d other errors)", s, n-1)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go
deleted file mode 100644
index b8dcf8f361..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import (
- "time"
-
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
- pb "google.golang.org/appengine/internal/app_identity"
- modpb "google.golang.org/appengine/internal/modules"
-)
-
-// AppID returns the application ID for the current application.
-// The string will be a plain application ID (e.g. "appid"), with a
-// domain prefix for custom domain deployments (e.g. "example.com:appid").
-func AppID(c context.Context) string { return internal.AppID(c) }
-
-// DefaultVersionHostname returns the standard hostname of the default version
-// of the current application (e.g. "my-app.appspot.com"). This is suitable for
-// use in constructing URLs.
-func DefaultVersionHostname(c context.Context) string {
- return internal.DefaultVersionHostname(c)
-}
-
-// ModuleName returns the module name of the current instance.
-func ModuleName(c context.Context) string {
- return internal.ModuleName(c)
-}
-
-// ModuleHostname returns a hostname of a module instance.
-// If module is the empty string, it refers to the module of the current instance.
-// If version is empty, it refers to the version of the current instance if valid,
-// or the default version of the module of the current instance.
-// If instance is empty, ModuleHostname returns the load-balancing hostname.
-func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
- req := &modpb.GetHostnameRequest{}
- if module != "" {
- req.Module = &module
- }
- if version != "" {
- req.Version = &version
- }
- if instance != "" {
- req.Instance = &instance
- }
- res := &modpb.GetHostnameResponse{}
- if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
- return "", err
- }
- return *res.Hostname, nil
-}
-
-// VersionID returns the version ID for the current application.
-// It will be of the form "X.Y", where X is specified in app.yaml,
-// and Y is a number generated when each version of the app is uploaded.
-// It does not include a module name.
-func VersionID(c context.Context) string { return internal.VersionID(c) }
-
-// InstanceID returns a mostly-unique identifier for this instance.
-func InstanceID() string { return internal.InstanceID() }
-
-// Datacenter returns an identifier for the datacenter that the instance is running in.
-func Datacenter(c context.Context) string { return internal.Datacenter(c) }
-
-// ServerSoftware returns the App Engine release version.
-// In production, it looks like "Google App Engine/X.Y.Z".
-// In the development appserver, it looks like "Development/X.Y".
-func ServerSoftware() string { return internal.ServerSoftware() }
-
-// RequestID returns a string that uniquely identifies the request.
-func RequestID(c context.Context) string { return internal.RequestID(c) }
-
-// AccessToken generates an OAuth2 access token for the specified scopes on
-// behalf of service account of this application. This token will expire after
-// the returned time.
-func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
- req := &pb.GetAccessTokenRequest{Scope: scopes}
- res := &pb.GetAccessTokenResponse{}
-
- err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
- if err != nil {
- return "", time.Time{}, err
- }
- return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
-}
-
-// Certificate represents a public certificate for the app.
-type Certificate struct {
- KeyName string
- Data []byte // PEM-encoded X.509 certificate
-}
-
-// PublicCertificates retrieves the public certificates for the app.
-// They can be used to verify a signature returned by SignBytes.
-func PublicCertificates(c context.Context) ([]Certificate, error) {
- req := &pb.GetPublicCertificateForAppRequest{}
- res := &pb.GetPublicCertificateForAppResponse{}
- if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
- return nil, err
- }
- var cs []Certificate
- for _, pc := range res.PublicCertificateList {
- cs = append(cs, Certificate{
- KeyName: pc.GetKeyName(),
- Data: []byte(pc.GetX509CertificatePem()),
- })
- }
- return cs, nil
-}
-
-// ServiceAccount returns a string representing the service account name, in
-// the form of an email address (typically app_id@appspot.gserviceaccount.com).
-func ServiceAccount(c context.Context) (string, error) {
- req := &pb.GetServiceAccountNameRequest{}
- res := &pb.GetServiceAccountNameResponse{}
-
- err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
- if err != nil {
- return "", err
- }
- return res.GetServiceAccountName(), err
-}
-
-// SignBytes signs bytes using a private key unique to your application.
-func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
- req := &pb.SignForAppRequest{BytesToSign: bytes}
- res := &pb.SignForAppResponse{}
-
- if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
- return "", nil, err
- }
- return res.GetKeyName(), res.GetSignatureBytes(), nil
-}
-
-func init() {
- internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
- internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go
deleted file mode 100644
index aa139d4d51..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go
+++ /dev/null
@@ -1,640 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- logpb "google.golang.org/appengine/internal/log"
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
- apiPath = "/rpc_http"
-)
-
-var (
- // Incoming headers.
- ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
- dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
- traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
- curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
- userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
- remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
-
- // Outgoing headers.
- apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
- apiEndpointHeaderValue = []string{"app-engine-apis"}
- apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
- apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
- apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
- apiContentType = http.CanonicalHeaderKey("Content-Type")
- apiContentTypeValue = []string{"application/octet-stream"}
- logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
- apiHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: limitDial,
- },
- }
-)
-
-func apiURL() *url.URL {
- host, port := "appengine.googleapis.internal", "10001"
- if h := os.Getenv("API_HOST"); h != "" {
- host = h
- }
- if p := os.Getenv("API_PORT"); p != "" {
- port = p
- }
- return &url.URL{
- Scheme: "http",
- Host: host + ":" + port,
- Path: apiPath,
- }
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- stopFlushing := make(chan int)
-
- ctxs.Lock()
- ctxs.m[r] = c
- ctxs.Unlock()
- defer func() {
- ctxs.Lock()
- delete(ctxs.m, r)
- ctxs.Unlock()
- }()
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
-
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- go c.flushLog(false)
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
-}
-
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- http.DefaultServeMux.ServeHTTP(c, r)
-}
-
-func renderPanic(x interface{}) string {
- buf := make([]byte, 16<<10) // 16 KB should be plenty
- buf = buf[:runtime.Stack(buf, false)]
-
- // Remove the first few stack frames:
- // this func
- // the recover closure in the caller
- // That will root the stack trace at the site of the panic.
- const (
- skipStart = "internal.renderPanic"
- skipFrames = 2
- )
- start := bytes.Index(buf, []byte(skipStart))
- p := start
- for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
- p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
- if p < 0 {
- break
- }
- }
- if p >= 0 {
- // buf[start:p+1] is the block to remove.
- // Copy buf[p+1:] over buf[start:] and shrink buf.
- copy(buf[start:], buf[p+1:])
- buf = buf[:len(buf)-(p+1-start)]
- }
-
- // Add panic heading.
- head := fmt.Sprintf("panic: %v\n\n", x)
- if len(head) > len(buf) {
- // Extremely unlikely to happen.
- return head
- }
- copy(buf[len(head):], buf)
- copy(buf, head)
-
- return string(buf)
-}
-
-var ctxs = struct {
- sync.Mutex
- m map[*http.Request]*context
- bg *context // background context, lazily initialized
- // dec is used by tests to decorate the netcontext.Context returned
- // for a given request. This allows tests to add overrides (such as
- // WithAppIDOverride) to the context. The map is nil outside tests.
- dec map[*http.Request]func(netcontext.Context) netcontext.Context
-}{
- m: make(map[*http.Request]*context),
-}
-
-// context represents the context of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
- req *http.Request
-
- outCode int
- outHeader http.Header
- outBody []byte
-
- pendingLogs struct {
- sync.Mutex
- lines []*logpb.UserAppLogLine
- flushes int
- }
-
- apiURL *url.URL
-}
-
-var contextKey = "holds a *context"
-
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
- return c
-}
-
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
- if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
- ctx = withNamespace(ctx, ns)
- }
- return ctx
-}
-
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- return c.req.Header
- }
- return nil
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- ctxs.Lock()
- c := ctxs.m[req]
- d := ctxs.dec[req]
- ctxs.Unlock()
-
- if d != nil {
- parent = d(parent)
- }
-
- if c == nil {
- // Someone passed in an http.Request that is not in-flight.
- // We panic here rather than panicking at a later point
- // so that stack traces will be more sensible.
- log.Panic("appengine: NewContext passed an unknown http.Request")
- }
- return withContext(parent, c)
-}
-
-func BackgroundContext() netcontext.Context {
- ctxs.Lock()
- defer ctxs.Unlock()
-
- if ctxs.bg != nil {
- return toContext(ctxs.bg)
- }
-
- // Compute background security ticket.
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
-
- ctxs.bg = &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go ctxs.bg.logFlusher(make(chan int))
-
- return toContext(ctxs.bg)
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctxs.Lock()
- defer ctxs.Unlock()
- if _, ok := ctxs.m[req]; ok {
- log.Panic("req already associated with context")
- }
- if _, ok := ctxs.dec[req]; ok {
- log.Panic("req already associated with context")
- }
- if ctxs.dec == nil {
- ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
- }
- ctxs.m[req] = c
- ctxs.dec[req] = decorate
-
- return func() {
- ctxs.Lock()
- delete(ctxs.m, req)
- delete(ctxs.dec, req)
- ctxs.Unlock()
- }
-}
-
-var errTimeout = &CallError{
- Detail: "Deadline exceeded",
- Code: int32(remotepb.RpcError_CANCELLED),
- Timeout: true,
-}
-
-func (c *context) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-func (c *context) Write(b []byte) (int, error) {
- if c.outCode == 0 {
- c.WriteHeader(http.StatusOK)
- }
- if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
- return 0, http.ErrBodyNotAllowed
- }
- c.outBody = append(c.outBody, b...)
- return len(b), nil
-}
-
-func (c *context) WriteHeader(code int) {
- if c.outCode != 0 {
- logf(c, 3, "WriteHeader called multiple times on request.") // error level
- return
- }
- c.outCode = code
-}
-
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
- hreq := &http.Request{
- Method: "POST",
- URL: c.apiURL,
- Header: http.Header{
- apiEndpointHeader: apiEndpointHeaderValue,
- apiMethodHeader: apiMethodHeaderValue,
- apiContentType: apiContentTypeValue,
- apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
- },
- Body: ioutil.NopCloser(bytes.NewReader(body)),
- ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
-
- tr := apiHTTPClient.Transport.(*http.Transport)
-
- var timedOut int32 // atomic; set to 1 if timed out
- t := time.AfterFunc(timeout, func() {
- atomic.StoreInt32(&timedOut, 1)
- tr.CancelRequest(hreq)
- })
- defer t.Stop()
- defer func() {
- // Check if timeout was exceeded.
- if atomic.LoadInt32(&timedOut) != 0 {
- err = errTimeout
- }
- }()
-
- hresp, err := apiHTTPClient.Do(hreq)
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- defer hresp.Body.Close()
- hrespBody, err := ioutil.ReadAll(hresp.Body)
- if hresp.StatusCode != 200 {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge response bad: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return hrespBody, nil
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errors.New("not an App Engine context")
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- // Default RPC timeout is 60s.
- timeout := 60 * time.Second
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- }
-
- data, err := proto.Marshal(in)
- if err != nil {
- return err
- }
-
- ticket := c.req.Header.Get(ticketHeader)
- req := &remotepb.Request{
- ServiceName: &service,
- Method: &method,
- Request: data,
- RequestId: &ticket,
- }
- hreqBody, err := proto.Marshal(req)
- if err != nil {
- return err
- }
-
- hrespBody, err := c.post(hreqBody, timeout)
- if err != nil {
- return err
- }
-
- res := &remotepb.Response{}
- if err := proto.Unmarshal(hrespBody, res); err != nil {
- return err
- }
- if res.RpcError != nil {
- ce := &CallError{
- Detail: res.RpcError.GetDetail(),
- Code: *res.RpcError.Code,
- }
- switch remotepb.RpcError_ErrorCode(ce.Code) {
- case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
- ce.Timeout = true
- }
- return ce
- }
- if res.ApplicationError != nil {
- return &APIError{
- Service: *req.ServiceName,
- Detail: res.ApplicationError.GetDetail(),
- Code: *res.ApplicationError.Code,
- }
- }
- if res.Exception != nil || res.JavaException != nil {
- // This shouldn't happen, but let's be defensive.
- return &CallError{
- Detail: "service bridge returned exception",
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return proto.Unmarshal(res.Response, out)
-}
-
-func (c *context) Request() *http.Request {
- return c.req
-}
-
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
- // Truncate long log lines.
- // TODO(dsymonds): Check if this is still necessary.
- const lim = 8 << 10
- if len(*ll.Message) > lim {
- suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
- ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
- }
-
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
- c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
- 0: "DEBUG",
- 1: "INFO",
- 2: "WARNING",
- 3: "ERROR",
- 4: "CRITICAL",
-}
-
-func logf(c *context, level int64, format string, args ...interface{}) {
- s := fmt.Sprintf(format, args...)
- s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- log.Print(logLevelName[level] + ": " + s)
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
- c.pendingLogs.Lock()
- // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
- n, rem := 0, 30<<20
- for ; n < len(c.pendingLogs.lines); n++ {
- ll := c.pendingLogs.lines[n]
- // Each log line will require about 3 bytes of overhead.
- nb := proto.Size(ll) + 3
- if nb > rem {
- break
- }
- rem -= nb
- }
- lines := c.pendingLogs.lines[:n]
- c.pendingLogs.lines = c.pendingLogs.lines[n:]
- c.pendingLogs.Unlock()
-
- if len(lines) == 0 && !force {
- // Nothing to flush.
- return false
- }
-
- rescueLogs := false
- defer func() {
- if rescueLogs {
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
- c.pendingLogs.Unlock()
- }
- }()
-
- buf, err := proto.Marshal(&logpb.UserAppLogGroup{
- LogLine: lines,
- })
- if err != nil {
- log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
- rescueLogs = true
- return false
- }
-
- req := &logpb.FlushRequest{
- Logs: buf,
- }
- res := &basepb.VoidProto{}
- c.pendingLogs.Lock()
- c.pendingLogs.flushes++
- c.pendingLogs.Unlock()
- if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
- log.Printf("internal.flushLog: Flush RPC: %v", err)
- rescueLogs = true
- return false
- }
- return true
-}
-
-const (
- // Log flushing parameters.
- flushInterval = 1 * time.Second
- forceFlushInterval = 60 * time.Second
-)
-
-func (c *context) logFlusher(stop <-chan int) {
- lastFlush := time.Now()
- tick := time.NewTicker(flushInterval)
- for {
- select {
- case <-stop:
- // Request finished.
- tick.Stop()
- return
- case <-tick.C:
- force := time.Now().Sub(lastFlush) > forceFlushInterval
- if c.flushLog(force) {
- lastFlush = time.Now()
- }
- }
- }
-}
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go
deleted file mode 100644
index 1c072e9dbb..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "errors"
- "net/http"
- "time"
-
- "appengine"
- "appengine_internal"
- basepb "appengine_internal/base"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var contextKey = "holds an appengine.Context"
-
-func fromContext(ctx netcontext.Context) appengine.Context {
- c, _ := ctx.Value(&contextKey).(appengine.Context)
- return c
-}
-
-// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
- return fromContext(ctx)
-}
-
-func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
-
- s := &basepb.StringProto{}
- c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
- if ns := s.GetValue(); ns != "" {
- ctx = NamespacedContext(ctx, ns)
- }
-
- return ctx
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- if req, ok := c.Request().(*http.Request); ok {
- return req.Header
- }
- }
- return nil
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- c := appengine.NewContext(req)
- return withContext(parent, c)
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errors.New("not an App Engine context")
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- var opts *appengine_internal.CallOptions
- if d, ok := ctx.Deadline(); ok {
- opts = &appengine_internal.CallOptions{
- Timeout: d.Sub(time.Now()),
- }
- }
-
- err := c.Call(service, method, in, out, opts)
- switch v := err.(type) {
- case *appengine_internal.APIError:
- return &APIError{
- Service: v.Service,
- Detail: v.Detail,
- Code: v.Code,
- }
- case *appengine_internal.CallError:
- return &CallError{
- Detail: v.Detail,
- Code: v.Code,
- Timeout: v.Timeout,
- }
- }
- return err
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- panic("handleHTTP called; this should be impossible")
-}
-
-func logf(c appengine.Context, level int64, format string, args ...interface{}) {
- var fn func(format string, args ...interface{})
- switch level {
- case 0:
- fn = c.Debugf
- case 1:
- fn = c.Infof
- case 2:
- fn = c.Warningf
- case 3:
- fn = c.Errorf
- case 4:
- fn = c.Criticalf
- default:
- // This shouldn't happen.
- fn = c.Criticalf
- }
- fn(format, args...)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go
deleted file mode 100644
index ec5383e660..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
-
-var callOverrideKey = "holds []CallOverrideFunc"
-
-func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
- // We avoid appending to any existing call override
- // so we don't risk overwriting a popped stack below.
- var cofs []CallOverrideFunc
- if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
- cofs = append(cofs, uf...)
- }
- cofs = append(cofs, f)
- return netcontext.WithValue(ctx, &callOverrideKey, cofs)
-}
-
-func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
- cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
- if len(cofs) == 0 {
- return nil, nil, false
- }
- // We found a list of overrides; grab the last, and reconstitute a
- // context that will hide it.
- f := cofs[len(cofs)-1]
- ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
- return f, ctx, true
-}
-
-type logOverrideFunc func(level int64, format string, args ...interface{})
-
-var logOverrideKey = "holds a logOverrideFunc"
-
-func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
- return netcontext.WithValue(ctx, &logOverrideKey, f)
-}
-
-var appIDOverrideKey = "holds a string, being the full app ID"
-
-func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
- return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var namespaceKey = "holds the namespace string"
-
-func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
- return netcontext.WithValue(ctx, &namespaceKey, ns)
-}
-
-func NamespaceFromContext(ctx netcontext.Context) string {
- // If there's no namespace, return the empty string.
- ns, _ := ctx.Value(&namespaceKey).(string)
- return ns
-}
-
-// FullyQualifiedAppID returns the fully-qualified application ID.
-// This may contain a partition prefix (e.g. "s~" for High Replication apps),
-// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx netcontext.Context) string {
- if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
- return id
- }
- return fullyQualifiedAppID(ctx)
-}
-
-func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
- if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
- f(level, format, args...)
- return
- }
- logf(fromContext(ctx), level, format, args...)
-}
-
-// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
- n := &namespacedContext{
- namespace: namespace,
- }
- return withNamespace(WithCallOverride(ctx, n.call), namespace)
-}
-
-type namespacedContext struct {
- namespace string
-}
-
-func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- // Apply any namespace mods.
- if mod, ok := NamespaceMods[service]; ok {
- mod(in, n.namespace)
- }
- return Call(ctx, service, method, in, out)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go
deleted file mode 100644
index 11df8c07b5..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "strings"
-)
-
-func parseFullAppID(appid string) (partition, domain, displayID string) {
- if i := strings.Index(appid, "~"); i != -1 {
- partition, appid = appid[:i], appid[i+1:]
- }
- if i := strings.Index(appid, ":"); i != -1 {
- domain, appid = appid[:i], appid[i+1:]
- }
- return partition, domain, appid
-}
-
-// appID returns "appid" or "domain.com:appid".
-func appID(fullAppID string) string {
- _, dom, dis := parseFullAppID(fullAppID)
- if dom != "" {
- return dom + ":" + dis
- }
- return dis
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
deleted file mode 100644
index 87d9701b8d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
-// DO NOT EDIT!
-
-/*
-Package app_identity is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/app_identity/app_identity_service.proto
-
-It has these top-level messages:
- AppIdentityServiceError
- SignForAppRequest
- SignForAppResponse
- GetPublicCertificateForAppRequest
- PublicCertificate
- GetPublicCertificateForAppResponse
- GetServiceAccountNameRequest
- GetServiceAccountNameResponse
- GetAccessTokenRequest
- GetAccessTokenResponse
- GetDefaultGcsBucketNameRequest
- GetDefaultGcsBucketNameResponse
-*/
-package app_identity
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type AppIdentityServiceError_ErrorCode int32
-
-const (
- AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
- AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
- AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
- AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
- AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
- AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
- AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
- AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
-)
-
-var AppIdentityServiceError_ErrorCode_name = map[int32]string{
- 0: "SUCCESS",
- 9: "UNKNOWN_SCOPE",
- 1000: "BLOB_TOO_LARGE",
- 1001: "DEADLINE_EXCEEDED",
- 1002: "NOT_A_VALID_APP",
- 1003: "UNKNOWN_ERROR",
- 1005: "NOT_ALLOWED",
- 1006: "NOT_IMPLEMENTED",
-}
-var AppIdentityServiceError_ErrorCode_value = map[string]int32{
- "SUCCESS": 0,
- "UNKNOWN_SCOPE": 9,
- "BLOB_TOO_LARGE": 1000,
- "DEADLINE_EXCEEDED": 1001,
- "NOT_A_VALID_APP": 1002,
- "UNKNOWN_ERROR": 1003,
- "NOT_ALLOWED": 1005,
- "NOT_IMPLEMENTED": 1006,
-}
-
-func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
- p := new(AppIdentityServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x AppIdentityServiceError_ErrorCode) String() string {
- return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
-}
-func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = AppIdentityServiceError_ErrorCode(value)
- return nil
-}
-
-type AppIdentityServiceError struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
-func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
-func (*AppIdentityServiceError) ProtoMessage() {}
-
-type SignForAppRequest struct {
- BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
-func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
-func (*SignForAppRequest) ProtoMessage() {}
-
-func (m *SignForAppRequest) GetBytesToSign() []byte {
- if m != nil {
- return m.BytesToSign
- }
- return nil
-}
-
-type SignForAppResponse struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
- SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
-func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
-func (*SignForAppResponse) ProtoMessage() {}
-
-func (m *SignForAppResponse) GetKeyName() string {
- if m != nil && m.KeyName != nil {
- return *m.KeyName
- }
- return ""
-}
-
-func (m *SignForAppResponse) GetSignatureBytes() []byte {
- if m != nil {
- return m.SignatureBytes
- }
- return nil
-}
-
-type GetPublicCertificateForAppRequest struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
-func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
-func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
-
-type PublicCertificate struct {
- KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
- X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
-func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
-func (*PublicCertificate) ProtoMessage() {}
-
-func (m *PublicCertificate) GetKeyName() string {
- if m != nil && m.KeyName != nil {
- return *m.KeyName
- }
- return ""
-}
-
-func (m *PublicCertificate) GetX509CertificatePem() string {
- if m != nil && m.X509CertificatePem != nil {
- return *m.X509CertificatePem
- }
- return ""
-}
-
-type GetPublicCertificateForAppResponse struct {
- PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
- MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
-func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
-func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
-
-func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
- if m != nil {
- return m.PublicCertificateList
- }
- return nil
-}
-
-func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
- if m != nil && m.MaxClientCacheTimeInSecond != nil {
- return *m.MaxClientCacheTimeInSecond
- }
- return 0
-}
-
-type GetServiceAccountNameRequest struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
-func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameRequest) ProtoMessage() {}
-
-type GetServiceAccountNameResponse struct {
- ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
-func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetServiceAccountNameResponse) ProtoMessage() {}
-
-func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
- if m != nil && m.ServiceAccountName != nil {
- return *m.ServiceAccountName
- }
- return ""
-}
-
-type GetAccessTokenRequest struct {
- Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
- ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
- ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
-func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenRequest) ProtoMessage() {}
-
-func (m *GetAccessTokenRequest) GetScope() []string {
- if m != nil {
- return m.Scope
- }
- return nil
-}
-
-func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
- if m != nil && m.ServiceAccountId != nil {
- return *m.ServiceAccountId
- }
- return 0
-}
-
-func (m *GetAccessTokenRequest) GetServiceAccountName() string {
- if m != nil && m.ServiceAccountName != nil {
- return *m.ServiceAccountName
- }
- return ""
-}
-
-type GetAccessTokenResponse struct {
- AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
- ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
-func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
-func (*GetAccessTokenResponse) ProtoMessage() {}
-
-func (m *GetAccessTokenResponse) GetAccessToken() string {
- if m != nil && m.AccessToken != nil {
- return *m.AccessToken
- }
- return ""
-}
-
-func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
- if m != nil && m.ExpirationTime != nil {
- return *m.ExpirationTime
- }
- return 0
-}
-
-type GetDefaultGcsBucketNameRequest struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
-func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
-
-type GetDefaultGcsBucketNameResponse struct {
- DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
-func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
-
-func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
- if m != nil && m.DefaultGcsBucketName != nil {
- return *m.DefaultGcsBucketName
- }
- return ""
-}
-
-func init() {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
deleted file mode 100644
index 19610ca5b7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+++ /dev/null
@@ -1,64 +0,0 @@
-syntax = "proto2";
-option go_package = "app_identity";
-
-package appengine;
-
-message AppIdentityServiceError {
- enum ErrorCode {
- SUCCESS = 0;
- UNKNOWN_SCOPE = 9;
- BLOB_TOO_LARGE = 1000;
- DEADLINE_EXCEEDED = 1001;
- NOT_A_VALID_APP = 1002;
- UNKNOWN_ERROR = 1003;
- NOT_ALLOWED = 1005;
- NOT_IMPLEMENTED = 1006;
- }
-}
-
-message SignForAppRequest {
- optional bytes bytes_to_sign = 1;
-}
-
-message SignForAppResponse {
- optional string key_name = 1;
- optional bytes signature_bytes = 2;
-}
-
-message GetPublicCertificateForAppRequest {
-}
-
-message PublicCertificate {
- optional string key_name = 1;
- optional string x509_certificate_pem = 2;
-}
-
-message GetPublicCertificateForAppResponse {
- repeated PublicCertificate public_certificate_list = 1;
- optional int64 max_client_cache_time_in_second = 2;
-}
-
-message GetServiceAccountNameRequest {
-}
-
-message GetServiceAccountNameResponse {
- optional string service_account_name = 1;
-}
-
-message GetAccessTokenRequest {
- repeated string scope = 1;
- optional int64 service_account_id = 2;
- optional string service_account_name = 3;
-}
-
-message GetAccessTokenResponse {
- optional string access_token = 1;
- optional int64 expiration_time = 2;
-}
-
-message GetDefaultGcsBucketNameRequest {
-}
-
-message GetDefaultGcsBucketNameResponse {
- optional string default_gcs_bucket_name = 1;
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
deleted file mode 100644
index 36a195650a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/base/api_base.proto
-// DO NOT EDIT!
-
-/*
-Package base is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/base/api_base.proto
-
-It has these top-level messages:
- StringProto
- Integer32Proto
- Integer64Proto
- BoolProto
- DoubleProto
- BytesProto
- VoidProto
-*/
-package base
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type StringProto struct {
- Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *StringProto) Reset() { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage() {}
-
-func (m *StringProto) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Integer32Proto struct {
- Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage() {}
-
-func (m *Integer32Proto) GetValue() int32 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Integer64Proto struct {
- Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage() {}
-
-func (m *Integer64Proto) GetValue() int64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BoolProto struct {
- Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *BoolProto) Reset() { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage() {}
-
-func (m *BoolProto) GetValue() bool {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return false
-}
-
-type DoubleProto struct {
- Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *DoubleProto) Reset() { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage() {}
-
-func (m *DoubleProto) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BytesProto struct {
- Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *BytesProto) Reset() { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage() {}
-
-func (m *BytesProto) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type VoidProto struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *VoidProto) Reset() { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage() {}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto
deleted file mode 100644
index 56cd7a3cad..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Built-in base types for API calls. Primarily useful as return types.
-
-syntax = "proto2";
-option go_package = "base";
-
-package appengine.base;
-
-message StringProto {
- required string value = 1;
-}
-
-message Integer32Proto {
- required int32 value = 1;
-}
-
-message Integer64Proto {
- required int64 value = 1;
-}
-
-message BoolProto {
- required bool value = 1;
-}
-
-message DoubleProto {
- required double value = 1;
-}
-
-message BytesProto {
- required bytes value = 1 [ctype=CORD];
-}
-
-message VoidProto {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
deleted file mode 100644
index 8613cb7311..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ /dev/null
@@ -1,2778 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-// DO NOT EDIT!
-
-/*
-Package datastore is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-It has these top-level messages:
- Action
- PropertyValue
- Property
- Path
- Reference
- User
- EntityProto
- CompositeProperty
- Index
- CompositeIndex
- IndexPostfix
- IndexPosition
- Snapshot
- InternalHeader
- Transaction
- Query
- CompiledQuery
- CompiledCursor
- Cursor
- Error
- Cost
- GetRequest
- GetResponse
- PutRequest
- PutResponse
- TouchRequest
- TouchResponse
- DeleteRequest
- DeleteResponse
- NextRequest
- QueryResult
- AllocateIdsRequest
- AllocateIdsResponse
- CompositeIndices
- AddActionsRequest
- AddActionsResponse
- BeginTransactionRequest
- CommitResponse
-*/
-package datastore
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type Property_Meaning int32
-
-const (
- Property_NO_MEANING Property_Meaning = 0
- Property_BLOB Property_Meaning = 14
- Property_TEXT Property_Meaning = 15
- Property_BYTESTRING Property_Meaning = 16
- Property_ATOM_CATEGORY Property_Meaning = 1
- Property_ATOM_LINK Property_Meaning = 2
- Property_ATOM_TITLE Property_Meaning = 3
- Property_ATOM_CONTENT Property_Meaning = 4
- Property_ATOM_SUMMARY Property_Meaning = 5
- Property_ATOM_AUTHOR Property_Meaning = 6
- Property_GD_WHEN Property_Meaning = 7
- Property_GD_EMAIL Property_Meaning = 8
- Property_GEORSS_POINT Property_Meaning = 9
- Property_GD_IM Property_Meaning = 10
- Property_GD_PHONENUMBER Property_Meaning = 11
- Property_GD_POSTALADDRESS Property_Meaning = 12
- Property_GD_RATING Property_Meaning = 13
- Property_BLOBKEY Property_Meaning = 17
- Property_ENTITY_PROTO Property_Meaning = 19
- Property_INDEX_VALUE Property_Meaning = 18
-)
-
-var Property_Meaning_name = map[int32]string{
- 0: "NO_MEANING",
- 14: "BLOB",
- 15: "TEXT",
- 16: "BYTESTRING",
- 1: "ATOM_CATEGORY",
- 2: "ATOM_LINK",
- 3: "ATOM_TITLE",
- 4: "ATOM_CONTENT",
- 5: "ATOM_SUMMARY",
- 6: "ATOM_AUTHOR",
- 7: "GD_WHEN",
- 8: "GD_EMAIL",
- 9: "GEORSS_POINT",
- 10: "GD_IM",
- 11: "GD_PHONENUMBER",
- 12: "GD_POSTALADDRESS",
- 13: "GD_RATING",
- 17: "BLOBKEY",
- 19: "ENTITY_PROTO",
- 18: "INDEX_VALUE",
-}
-var Property_Meaning_value = map[string]int32{
- "NO_MEANING": 0,
- "BLOB": 14,
- "TEXT": 15,
- "BYTESTRING": 16,
- "ATOM_CATEGORY": 1,
- "ATOM_LINK": 2,
- "ATOM_TITLE": 3,
- "ATOM_CONTENT": 4,
- "ATOM_SUMMARY": 5,
- "ATOM_AUTHOR": 6,
- "GD_WHEN": 7,
- "GD_EMAIL": 8,
- "GEORSS_POINT": 9,
- "GD_IM": 10,
- "GD_PHONENUMBER": 11,
- "GD_POSTALADDRESS": 12,
- "GD_RATING": 13,
- "BLOBKEY": 17,
- "ENTITY_PROTO": 19,
- "INDEX_VALUE": 18,
-}
-
-func (x Property_Meaning) Enum() *Property_Meaning {
- p := new(Property_Meaning)
- *p = x
- return p
-}
-func (x Property_Meaning) String() string {
- return proto.EnumName(Property_Meaning_name, int32(x))
-}
-func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
- if err != nil {
- return err
- }
- *x = Property_Meaning(value)
- return nil
-}
-
-type Property_FtsTokenizationOption int32
-
-const (
- Property_HTML Property_FtsTokenizationOption = 1
- Property_ATOM Property_FtsTokenizationOption = 2
-)
-
-var Property_FtsTokenizationOption_name = map[int32]string{
- 1: "HTML",
- 2: "ATOM",
-}
-var Property_FtsTokenizationOption_value = map[string]int32{
- "HTML": 1,
- "ATOM": 2,
-}
-
-func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
- p := new(Property_FtsTokenizationOption)
- *p = x
- return p
-}
-func (x Property_FtsTokenizationOption) String() string {
- return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
-}
-func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
- if err != nil {
- return err
- }
- *x = Property_FtsTokenizationOption(value)
- return nil
-}
-
-type EntityProto_Kind int32
-
-const (
- EntityProto_GD_CONTACT EntityProto_Kind = 1
- EntityProto_GD_EVENT EntityProto_Kind = 2
- EntityProto_GD_MESSAGE EntityProto_Kind = 3
-)
-
-var EntityProto_Kind_name = map[int32]string{
- 1: "GD_CONTACT",
- 2: "GD_EVENT",
- 3: "GD_MESSAGE",
-}
-var EntityProto_Kind_value = map[string]int32{
- "GD_CONTACT": 1,
- "GD_EVENT": 2,
- "GD_MESSAGE": 3,
-}
-
-func (x EntityProto_Kind) Enum() *EntityProto_Kind {
- p := new(EntityProto_Kind)
- *p = x
- return p
-}
-func (x EntityProto_Kind) String() string {
- return proto.EnumName(EntityProto_Kind_name, int32(x))
-}
-func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
- if err != nil {
- return err
- }
- *x = EntityProto_Kind(value)
- return nil
-}
-
-type Index_Property_Direction int32
-
-const (
- Index_Property_ASCENDING Index_Property_Direction = 1
- Index_Property_DESCENDING Index_Property_Direction = 2
-)
-
-var Index_Property_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Index_Property_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Index_Property_Direction) Enum() *Index_Property_Direction {
- p := new(Index_Property_Direction)
- *p = x
- return p
-}
-func (x Index_Property_Direction) String() string {
- return proto.EnumName(Index_Property_Direction_name, int32(x))
-}
-func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
- if err != nil {
- return err
- }
- *x = Index_Property_Direction(value)
- return nil
-}
-
-type CompositeIndex_State int32
-
-const (
- CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
- CompositeIndex_READ_WRITE CompositeIndex_State = 2
- CompositeIndex_DELETED CompositeIndex_State = 3
- CompositeIndex_ERROR CompositeIndex_State = 4
-)
-
-var CompositeIndex_State_name = map[int32]string{
- 1: "WRITE_ONLY",
- 2: "READ_WRITE",
- 3: "DELETED",
- 4: "ERROR",
-}
-var CompositeIndex_State_value = map[string]int32{
- "WRITE_ONLY": 1,
- "READ_WRITE": 2,
- "DELETED": 3,
- "ERROR": 4,
-}
-
-func (x CompositeIndex_State) Enum() *CompositeIndex_State {
- p := new(CompositeIndex_State)
- *p = x
- return p
-}
-func (x CompositeIndex_State) String() string {
- return proto.EnumName(CompositeIndex_State_name, int32(x))
-}
-func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
- if err != nil {
- return err
- }
- *x = CompositeIndex_State(value)
- return nil
-}
-
-type Snapshot_Status int32
-
-const (
- Snapshot_INACTIVE Snapshot_Status = 0
- Snapshot_ACTIVE Snapshot_Status = 1
-)
-
-var Snapshot_Status_name = map[int32]string{
- 0: "INACTIVE",
- 1: "ACTIVE",
-}
-var Snapshot_Status_value = map[string]int32{
- "INACTIVE": 0,
- "ACTIVE": 1,
-}
-
-func (x Snapshot_Status) Enum() *Snapshot_Status {
- p := new(Snapshot_Status)
- *p = x
- return p
-}
-func (x Snapshot_Status) String() string {
- return proto.EnumName(Snapshot_Status_name, int32(x))
-}
-func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
- if err != nil {
- return err
- }
- *x = Snapshot_Status(value)
- return nil
-}
-
-type Query_Hint int32
-
-const (
- Query_ORDER_FIRST Query_Hint = 1
- Query_ANCESTOR_FIRST Query_Hint = 2
- Query_FILTER_FIRST Query_Hint = 3
-)
-
-var Query_Hint_name = map[int32]string{
- 1: "ORDER_FIRST",
- 2: "ANCESTOR_FIRST",
- 3: "FILTER_FIRST",
-}
-var Query_Hint_value = map[string]int32{
- "ORDER_FIRST": 1,
- "ANCESTOR_FIRST": 2,
- "FILTER_FIRST": 3,
-}
-
-func (x Query_Hint) Enum() *Query_Hint {
- p := new(Query_Hint)
- *p = x
- return p
-}
-func (x Query_Hint) String() string {
- return proto.EnumName(Query_Hint_name, int32(x))
-}
-func (x *Query_Hint) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
- if err != nil {
- return err
- }
- *x = Query_Hint(value)
- return nil
-}
-
-type Query_Filter_Operator int32
-
-const (
- Query_Filter_LESS_THAN Query_Filter_Operator = 1
- Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
- Query_Filter_GREATER_THAN Query_Filter_Operator = 3
- Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
- Query_Filter_EQUAL Query_Filter_Operator = 5
- Query_Filter_IN Query_Filter_Operator = 6
- Query_Filter_EXISTS Query_Filter_Operator = 7
-)
-
-var Query_Filter_Operator_name = map[int32]string{
- 1: "LESS_THAN",
- 2: "LESS_THAN_OR_EQUAL",
- 3: "GREATER_THAN",
- 4: "GREATER_THAN_OR_EQUAL",
- 5: "EQUAL",
- 6: "IN",
- 7: "EXISTS",
-}
-var Query_Filter_Operator_value = map[string]int32{
- "LESS_THAN": 1,
- "LESS_THAN_OR_EQUAL": 2,
- "GREATER_THAN": 3,
- "GREATER_THAN_OR_EQUAL": 4,
- "EQUAL": 5,
- "IN": 6,
- "EXISTS": 7,
-}
-
-func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
- p := new(Query_Filter_Operator)
- *p = x
- return p
-}
-func (x Query_Filter_Operator) String() string {
- return proto.EnumName(Query_Filter_Operator_name, int32(x))
-}
-func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
- if err != nil {
- return err
- }
- *x = Query_Filter_Operator(value)
- return nil
-}
-
-type Query_Order_Direction int32
-
-const (
- Query_Order_ASCENDING Query_Order_Direction = 1
- Query_Order_DESCENDING Query_Order_Direction = 2
-)
-
-var Query_Order_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Query_Order_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Query_Order_Direction) Enum() *Query_Order_Direction {
- p := new(Query_Order_Direction)
- *p = x
- return p
-}
-func (x Query_Order_Direction) String() string {
- return proto.EnumName(Query_Order_Direction_name, int32(x))
-}
-func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
- if err != nil {
- return err
- }
- *x = Query_Order_Direction(value)
- return nil
-}
-
-type Error_ErrorCode int32
-
-const (
- Error_BAD_REQUEST Error_ErrorCode = 1
- Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
- Error_INTERNAL_ERROR Error_ErrorCode = 3
- Error_NEED_INDEX Error_ErrorCode = 4
- Error_TIMEOUT Error_ErrorCode = 5
- Error_PERMISSION_DENIED Error_ErrorCode = 6
- Error_BIGTABLE_ERROR Error_ErrorCode = 7
- Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
- Error_CAPABILITY_DISABLED Error_ErrorCode = 9
- Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
- Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
-)
-
-var Error_ErrorCode_name = map[int32]string{
- 1: "BAD_REQUEST",
- 2: "CONCURRENT_TRANSACTION",
- 3: "INTERNAL_ERROR",
- 4: "NEED_INDEX",
- 5: "TIMEOUT",
- 6: "PERMISSION_DENIED",
- 7: "BIGTABLE_ERROR",
- 8: "COMMITTED_BUT_STILL_APPLYING",
- 9: "CAPABILITY_DISABLED",
- 10: "TRY_ALTERNATE_BACKEND",
- 11: "SAFE_TIME_TOO_OLD",
-}
-var Error_ErrorCode_value = map[string]int32{
- "BAD_REQUEST": 1,
- "CONCURRENT_TRANSACTION": 2,
- "INTERNAL_ERROR": 3,
- "NEED_INDEX": 4,
- "TIMEOUT": 5,
- "PERMISSION_DENIED": 6,
- "BIGTABLE_ERROR": 7,
- "COMMITTED_BUT_STILL_APPLYING": 8,
- "CAPABILITY_DISABLED": 9,
- "TRY_ALTERNATE_BACKEND": 10,
- "SAFE_TIME_TOO_OLD": 11,
-}
-
-func (x Error_ErrorCode) Enum() *Error_ErrorCode {
- p := new(Error_ErrorCode)
- *p = x
- return p
-}
-func (x Error_ErrorCode) String() string {
- return proto.EnumName(Error_ErrorCode_name, int32(x))
-}
-func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
- if err != nil {
- return err
- }
- *x = Error_ErrorCode(value)
- return nil
-}
-
-type PutRequest_AutoIdPolicy int32
-
-const (
- PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
- PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
-)
-
-var PutRequest_AutoIdPolicy_name = map[int32]string{
- 0: "CURRENT",
- 1: "SEQUENTIAL",
-}
-var PutRequest_AutoIdPolicy_value = map[string]int32{
- "CURRENT": 0,
- "SEQUENTIAL": 1,
-}
-
-func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
- p := new(PutRequest_AutoIdPolicy)
- *p = x
- return p
-}
-func (x PutRequest_AutoIdPolicy) String() string {
- return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
-}
-func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
- if err != nil {
- return err
- }
- *x = PutRequest_AutoIdPolicy(value)
- return nil
-}
-
-type Action struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Action) Reset() { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage() {}
-
-type PropertyValue struct {
- Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
- BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
- StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
- Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
- Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
- Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PropertyValue) Reset() { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage() {}
-
-func (m *PropertyValue) GetInt64Value() int64 {
- if m != nil && m.Int64Value != nil {
- return *m.Int64Value
- }
- return 0
-}
-
-func (m *PropertyValue) GetBooleanValue() bool {
- if m != nil && m.BooleanValue != nil {
- return *m.BooleanValue
- }
- return false
-}
-
-func (m *PropertyValue) GetStringValue() string {
- if m != nil && m.StringValue != nil {
- return *m.StringValue
- }
- return ""
-}
-
-func (m *PropertyValue) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
- if m != nil {
- return m.Pointvalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
- if m != nil {
- return m.Uservalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
- if m != nil {
- return m.Referencevalue
- }
- return nil
-}
-
-type PropertyValue_PointValue struct {
- X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
- Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage() {}
-
-func (m *PropertyValue_PointValue) GetX() float64 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *PropertyValue_PointValue) GetY() float64 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type PropertyValue_UserValue struct {
- Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage() {}
-
-func (m *PropertyValue_UserValue) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type PropertyValue_ReferenceValue struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
- Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage() {}
-
-func (m *PropertyValue_ReferenceValue) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
- if m != nil {
- return m.Pathelement
- }
- return nil
-}
-
-type PropertyValue_ReferenceValue_PathElement struct {
- Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
- *m = PropertyValue_ReferenceValue_PathElement{}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Property struct {
- Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
- MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
- Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
- Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
- FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
- Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Property) Reset() { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage() {}
-
-const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
-const Default_Property_Searchable bool = false
-const Default_Property_Locale string = "en"
-
-func (m *Property) GetMeaning() Property_Meaning {
- if m != nil && m.Meaning != nil {
- return *m.Meaning
- }
- return Default_Property_Meaning
-}
-
-func (m *Property) GetMeaningUri() string {
- if m != nil && m.MeaningUri != nil {
- return *m.MeaningUri
- }
- return ""
-}
-
-func (m *Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Property) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Property) GetMultiple() bool {
- if m != nil && m.Multiple != nil {
- return *m.Multiple
- }
- return false
-}
-
-func (m *Property) GetSearchable() bool {
- if m != nil && m.Searchable != nil {
- return *m.Searchable
- }
- return Default_Property_Searchable
-}
-
-func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
- if m != nil && m.FtsTokenizationOption != nil {
- return *m.FtsTokenizationOption
- }
- return Property_HTML
-}
-
-func (m *Property) GetLocale() string {
- if m != nil && m.Locale != nil {
- return *m.Locale
- }
- return Default_Property_Locale
-}
-
-type Path struct {
- Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Path) Reset() { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage() {}
-
-func (m *Path) GetElement() []*Path_Element {
- if m != nil {
- return m.Element
- }
- return nil
-}
-
-type Path_Element struct {
- Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Path_Element) Reset() { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage() {}
-
-func (m *Path_Element) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *Path_Element) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *Path_Element) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Reference struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
- Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Reference) Reset() { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage() {}
-
-func (m *Reference) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Reference) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Reference) GetPath() *Path {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-type User struct {
- Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-
-func (m *User) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *User) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *User) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *User) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *User) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type EntityProto struct {
- Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
- EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
- Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
- Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
- KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
- Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *EntityProto) Reset() { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage() {}
-
-func (m *EntityProto) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *EntityProto) GetEntityGroup() *Path {
- if m != nil {
- return m.EntityGroup
- }
- return nil
-}
-
-func (m *EntityProto) GetOwner() *User {
- if m != nil {
- return m.Owner
- }
- return nil
-}
-
-func (m *EntityProto) GetKind() EntityProto_Kind {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return EntityProto_GD_CONTACT
-}
-
-func (m *EntityProto) GetKindUri() string {
- if m != nil && m.KindUri != nil {
- return *m.KindUri
- }
- return ""
-}
-
-func (m *EntityProto) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-func (m *EntityProto) GetRawProperty() []*Property {
- if m != nil {
- return m.RawProperty
- }
- return nil
-}
-
-func (m *EntityProto) GetRank() int32 {
- if m != nil && m.Rank != nil {
- return *m.Rank
- }
- return 0
-}
-
-type CompositeProperty struct {
- IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage() {}
-
-func (m *CompositeProperty) GetIndexId() int64 {
- if m != nil && m.IndexId != nil {
- return *m.IndexId
- }
- return 0
-}
-
-func (m *CompositeProperty) GetValue() []string {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Index struct {
- EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
- Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
- Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Index) Reset() { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage() {}
-
-func (m *Index) GetEntityType() string {
- if m != nil && m.EntityType != nil {
- return *m.EntityType
- }
- return ""
-}
-
-func (m *Index) GetAncestor() bool {
- if m != nil && m.Ancestor != nil {
- return *m.Ancestor
- }
- return false
-}
-
-func (m *Index) GetProperty() []*Index_Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Index_Property struct {
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Index_Property) Reset() { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage() {}
-
-const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
-
-func (m *Index_Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Index_Property) GetDirection() Index_Property_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Index_Property_Direction
-}
-
-type CompositeIndex struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
- Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
- State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
- OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage() {}
-
-const Default_CompositeIndex_OnlyUseIfRequired bool = false
-
-func (m *CompositeIndex) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *CompositeIndex) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *CompositeIndex) GetDefinition() *Index {
- if m != nil {
- return m.Definition
- }
- return nil
-}
-
-func (m *CompositeIndex) GetState() CompositeIndex_State {
- if m != nil && m.State != nil {
- return *m.State
- }
- return CompositeIndex_WRITE_ONLY
-}
-
-func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
- if m != nil && m.OnlyUseIfRequired != nil {
- return *m.OnlyUseIfRequired
- }
- return Default_CompositeIndex_OnlyUseIfRequired
-}
-
-type IndexPostfix struct {
- IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
- Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage() {}
-
-const Default_IndexPostfix_Before bool = true
-
-func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
- if m != nil {
- return m.IndexValue
- }
- return nil
-}
-
-func (m *IndexPostfix) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *IndexPostfix) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPostfix_Before
-}
-
-type IndexPostfix_IndexValue struct {
- PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage() {}
-
-func (m *IndexPostfix_IndexValue) GetPropertyName() string {
- if m != nil && m.PropertyName != nil {
- return *m.PropertyName
- }
- return ""
-}
-
-func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type IndexPosition struct {
- Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *IndexPosition) Reset() { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage() {}
-
-const Default_IndexPosition_Before bool = true
-
-func (m *IndexPosition) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *IndexPosition) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPosition_Before
-}
-
-type Snapshot struct {
- Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-
-func (m *Snapshot) GetTs() int64 {
- if m != nil && m.Ts != nil {
- return *m.Ts
- }
- return 0
-}
-
-type InternalHeader struct {
- Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *InternalHeader) Reset() { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage() {}
-
-func (m *InternalHeader) GetQos() string {
- if m != nil && m.Qos != nil {
- return *m.Qos
- }
- return ""
-}
-
-type Transaction struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
- App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
- MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Transaction) Reset() { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage() {}
-
-const Default_Transaction_MarkChanges bool = false
-
-func (m *Transaction) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Transaction) GetHandle() uint64 {
- if m != nil && m.Handle != nil {
- return *m.Handle
- }
- return 0
-}
-
-func (m *Transaction) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Transaction) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_Transaction_MarkChanges
-}
-
-type Query struct {
- Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
- Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
- Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
- SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
- Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
- Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
- Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
- EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
- RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
- KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
- Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
- Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
- FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
- PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
- GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
- Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
- MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
- SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
- PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Query) Reset() { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage() {}
-
-const Default_Query_Offset int32 = 0
-const Default_Query_RequirePerfectPlan bool = false
-const Default_Query_KeysOnly bool = false
-const Default_Query_Compile bool = false
-const Default_Query_PersistOffset bool = false
-
-func (m *Query) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Query) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Query) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Query) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *Query) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-func (m *Query) GetFilter() []*Query_Filter {
- if m != nil {
- return m.Filter
- }
- return nil
-}
-
-func (m *Query) GetSearchQuery() string {
- if m != nil && m.SearchQuery != nil {
- return *m.SearchQuery
- }
- return ""
-}
-
-func (m *Query) GetOrder() []*Query_Order {
- if m != nil {
- return m.Order
- }
- return nil
-}
-
-func (m *Query) GetHint() Query_Hint {
- if m != nil && m.Hint != nil {
- return *m.Hint
- }
- return Query_ORDER_FIRST
-}
-
-func (m *Query) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *Query) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_Query_Offset
-}
-
-func (m *Query) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *Query) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetEndCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.EndCompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *Query) GetRequirePerfectPlan() bool {
- if m != nil && m.RequirePerfectPlan != nil {
- return *m.RequirePerfectPlan
- }
- return Default_Query_RequirePerfectPlan
-}
-
-func (m *Query) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return Default_Query_KeysOnly
-}
-
-func (m *Query) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *Query) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_Query_Compile
-}
-
-func (m *Query) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *Query) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *Query) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *Query) GetGroupByPropertyName() []string {
- if m != nil {
- return m.GroupByPropertyName
- }
- return nil
-}
-
-func (m *Query) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return false
-}
-
-func (m *Query) GetMinSafeTimeSeconds() int64 {
- if m != nil && m.MinSafeTimeSeconds != nil {
- return *m.MinSafeTimeSeconds
- }
- return 0
-}
-
-func (m *Query) GetSafeReplicaName() []string {
- if m != nil {
- return m.SafeReplicaName
- }
- return nil
-}
-
-func (m *Query) GetPersistOffset() bool {
- if m != nil && m.PersistOffset != nil {
- return *m.PersistOffset
- }
- return Default_Query_PersistOffset
-}
-
-type Query_Filter struct {
- Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Query_Filter) Reset() { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage() {}
-
-func (m *Query_Filter) GetOp() Query_Filter_Operator {
- if m != nil && m.Op != nil {
- return *m.Op
- }
- return Query_Filter_LESS_THAN
-}
-
-func (m *Query_Filter) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Query_Order struct {
- Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
- Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Query_Order) Reset() { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage() {}
-
-const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
-
-func (m *Query_Order) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *Query_Order) GetDirection() Query_Order_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Query_Order_Direction
-}
-
-type CompiledQuery struct {
- Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
- Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
- IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
- KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
- PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
- DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
- Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage() {}
-
-const Default_CompiledQuery_Offset int32 = 0
-
-func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
- if m != nil {
- return m.Primaryscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
- if m != nil {
- return m.Mergejoinscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetIndexDef() *Index {
- if m != nil {
- return m.IndexDef
- }
- return nil
-}
-
-func (m *CompiledQuery) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_CompiledQuery_Offset
-}
-
-func (m *CompiledQuery) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *CompiledQuery) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *CompiledQuery) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *CompiledQuery) GetDistinctInfixSize() int32 {
- if m != nil && m.DistinctInfixSize != nil {
- return *m.DistinctInfixSize
- }
- return 0
-}
-
-func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
- if m != nil {
- return m.Entityfilter
- }
- return nil
-}
-
-type CompiledQuery_PrimaryScan struct {
- IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
- StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
- StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
- EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
- EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
- StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
- EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
- EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
-
-func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
- if m != nil && m.EndKey != nil {
- return *m.EndKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
- if m != nil && m.EndInclusive != nil {
- return *m.EndInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
- if m != nil {
- return m.StartPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
- if m != nil {
- return m.EndPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
- if m != nil && m.EndUnappliedLogTimestampUs != nil {
- return *m.EndUnappliedLogTimestampUs
- }
- return 0
-}
-
-type CompiledQuery_MergeJoinScan struct {
- IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
- PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
- ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
-
-const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
-
-func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
- if m != nil {
- return m.PrefixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
- if m != nil && m.ValuePrefix != nil {
- return *m.ValuePrefix
- }
- return Default_CompiledQuery_MergeJoinScan_ValuePrefix
-}
-
-type CompiledQuery_EntityFilter struct {
- Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
- Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage() {}
-
-const Default_CompiledQuery_EntityFilter_Distinct bool = false
-
-func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return Default_CompiledQuery_EntityFilter_Distinct
-}
-
-func (m *CompiledQuery_EntityFilter) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-type CompiledCursor struct {
- Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage() {}
-
-func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
- if m != nil {
- return m.Position
- }
- return nil
-}
-
-type CompiledCursor_Position struct {
- StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
- Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
- Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
- StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage() {}
-
-const Default_CompiledCursor_Position_StartInclusive bool = true
-
-func (m *CompiledCursor_Position) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
- if m != nil {
- return m.Indexvalue
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return Default_CompiledCursor_Position_StartInclusive
-}
-
-type CompiledCursor_Position_IndexValue struct {
- Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
- Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
-func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
-
-func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Cursor struct {
- Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
- App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Cursor) Reset() { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage() {}
-
-func (m *Cursor) GetCursor() uint64 {
- if m != nil && m.Cursor != nil {
- return *m.Cursor
- }
- return 0
-}
-
-func (m *Cursor) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-type Error struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-
-type Cost struct {
- IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
- IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
- EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
- EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
- Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
- ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
- IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Cost) Reset() { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage() {}
-
-func (m *Cost) GetIndexWrites() int32 {
- if m != nil && m.IndexWrites != nil {
- return *m.IndexWrites
- }
- return 0
-}
-
-func (m *Cost) GetIndexWriteBytes() int32 {
- if m != nil && m.IndexWriteBytes != nil {
- return *m.IndexWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetEntityWrites() int32 {
- if m != nil && m.EntityWrites != nil {
- return *m.EntityWrites
- }
- return 0
-}
-
-func (m *Cost) GetEntityWriteBytes() int32 {
- if m != nil && m.EntityWriteBytes != nil {
- return *m.EntityWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetCommitcost() *Cost_CommitCost {
- if m != nil {
- return m.Commitcost
- }
- return nil
-}
-
-func (m *Cost) GetApproximateStorageDelta() int32 {
- if m != nil && m.ApproximateStorageDelta != nil {
- return *m.ApproximateStorageDelta
- }
- return 0
-}
-
-func (m *Cost) GetIdSequenceUpdates() int32 {
- if m != nil && m.IdSequenceUpdates != nil {
- return *m.IdSequenceUpdates
- }
- return 0
-}
-
-type Cost_CommitCost struct {
- RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
- RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage() {}
-
-func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
- if m != nil && m.RequestedEntityPuts != nil {
- return *m.RequestedEntityPuts
- }
- return 0
-}
-
-func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
- if m != nil && m.RequestedEntityDeletes != nil {
- return *m.RequestedEntityDeletes
- }
- return 0
-}
-
-type GetRequest struct {
- Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
- AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetRequest) Reset() { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage() {}
-
-const Default_GetRequest_AllowDeferred bool = false
-
-func (m *GetRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *GetRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *GetRequest) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *GetRequest) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *GetRequest) GetAllowDeferred() bool {
- if m != nil && m.AllowDeferred != nil {
- return *m.AllowDeferred
- }
- return Default_GetRequest_AllowDeferred
-}
-
-type GetResponse struct {
- Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
- Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
- InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetResponse) Reset() { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage() {}
-
-const Default_GetResponse_InOrder bool = true
-
-func (m *GetResponse) GetEntity() []*GetResponse_Entity {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse) GetDeferred() []*Reference {
- if m != nil {
- return m.Deferred
- }
- return nil
-}
-
-func (m *GetResponse) GetInOrder() bool {
- if m != nil && m.InOrder != nil {
- return *m.InOrder
- }
- return Default_GetResponse_InOrder
-}
-
-type GetResponse_Entity struct {
- Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
- Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
- Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage() {}
-
-func (m *GetResponse_Entity) GetEntity() *EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-type PutRequest struct {
- Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
- Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-
-const Default_PutRequest_Trusted bool = false
-const Default_PutRequest_Force bool = false
-const Default_PutRequest_MarkChanges bool = false
-const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
-
-func (m *PutRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutRequest) GetEntity() []*EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *PutRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *PutRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_PutRequest_Trusted
-}
-
-func (m *PutRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_PutRequest_Force
-}
-
-func (m *PutRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_PutRequest_MarkChanges
-}
-
-func (m *PutRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
- if m != nil && m.AutoIdPolicy != nil {
- return *m.AutoIdPolicy
- }
- return Default_PutRequest_AutoIdPolicy
-}
-
-type PutResponse struct {
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-
-func (m *PutResponse) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *PutResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type TouchRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
- Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *TouchRequest) Reset() { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage() {}
-
-const Default_TouchRequest_Force bool = false
-
-func (m *TouchRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TouchRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *TouchRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_TouchRequest_Force
-}
-
-func (m *TouchRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type TouchResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *TouchResponse) Reset() { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage() {}
-
-func (m *TouchResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type DeleteRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage() {}
-
-const Default_DeleteRequest_Trusted bool = false
-const Default_DeleteRequest_Force bool = false
-const Default_DeleteRequest_MarkChanges bool = false
-
-func (m *DeleteRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_DeleteRequest_Trusted
-}
-
-func (m *DeleteRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_DeleteRequest_Force
-}
-
-func (m *DeleteRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_DeleteRequest_MarkChanges
-}
-
-func (m *DeleteRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type DeleteResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage() {}
-
-func (m *DeleteResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *DeleteResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type NextRequest struct {
- Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
- Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
- Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
- Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NextRequest) Reset() { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage() {}
-
-const Default_NextRequest_Offset int32 = 0
-const Default_NextRequest_Compile bool = false
-
-func (m *NextRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *NextRequest) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *NextRequest) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *NextRequest) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_NextRequest_Offset
-}
-
-func (m *NextRequest) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_NextRequest_Compile
-}
-
-type QueryResult struct {
- Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
- Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
- SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
- MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
- KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
- IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
- SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
- CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
- Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
- Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *QueryResult) Reset() { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage() {}
-
-func (m *QueryResult) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *QueryResult) GetResult() []*EntityProto {
- if m != nil {
- return m.Result
- }
- return nil
-}
-
-func (m *QueryResult) GetSkippedResults() int32 {
- if m != nil && m.SkippedResults != nil {
- return *m.SkippedResults
- }
- return 0
-}
-
-func (m *QueryResult) GetMoreResults() bool {
- if m != nil && m.MoreResults != nil {
- return *m.MoreResults
- }
- return false
-}
-
-func (m *QueryResult) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *QueryResult) GetIndexOnly() bool {
- if m != nil && m.IndexOnly != nil {
- return *m.IndexOnly
- }
- return false
-}
-
-func (m *QueryResult) GetSmallOps() bool {
- if m != nil && m.SmallOps != nil {
- return *m.SmallOps
- }
- return false
-}
-
-func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
- if m != nil {
- return m.CompiledQuery
- }
- return nil
-}
-
-func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *QueryResult) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-func (m *QueryResult) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type AllocateIdsRequest struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
- Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
- Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage() {}
-
-func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetModelKey() *Reference {
- if m != nil {
- return m.ModelKey
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetSize() int64 {
- if m != nil && m.Size != nil {
- return *m.Size
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetMax() int64 {
- if m != nil && m.Max != nil {
- return *m.Max
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetReserve() []*Reference {
- if m != nil {
- return m.Reserve
- }
- return nil
-}
-
-type AllocateIdsResponse struct {
- Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
- End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
- Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage() {}
-
-func (m *AllocateIdsResponse) GetStart() int64 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetEnd() int64 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type CompositeIndices struct {
- Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage() {}
-
-func (m *CompositeIndices) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-type AddActionsRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
- Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage() {}
-
-func (m *AddActionsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetAction() []*Action {
- if m != nil {
- return m.Action
- }
- return nil
-}
-
-type AddActionsResponse struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage() {}
-
-type BeginTransactionRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage() {}
-
-const Default_BeginTransactionRequest_AllowMultipleEg bool = false
-
-func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *BeginTransactionRequest) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
- if m != nil && m.AllowMultipleEg != nil {
- return *m.AllowMultipleEg
- }
- return Default_BeginTransactionRequest_AllowMultipleEg
-}
-
-type CommitResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CommitResponse) Reset() { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage() {}
-
-func (m *CommitResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type CommitResponse_Version struct {
- RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
- Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage() {}
-
-func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
- if m != nil {
- return m.RootEntityKey
- }
- return nil
-}
-
-func (m *CommitResponse_Version) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-func init() {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
deleted file mode 100644
index e76f126ff7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
+++ /dev/null
@@ -1,541 +0,0 @@
-syntax = "proto2";
-option go_package = "datastore";
-
-package appengine;
-
-message Action{}
-
-message PropertyValue {
- optional int64 int64Value = 1;
- optional bool booleanValue = 2;
- optional string stringValue = 3;
- optional double doubleValue = 4;
-
- optional group PointValue = 5 {
- required double x = 6;
- required double y = 7;
- }
-
- optional group UserValue = 8 {
- required string email = 9;
- required string auth_domain = 10;
- optional string nickname = 11;
- optional string federated_identity = 21;
- optional string federated_provider = 22;
- }
-
- optional group ReferenceValue = 12 {
- required string app = 13;
- optional string name_space = 20;
- repeated group PathElement = 14 {
- required string type = 15;
- optional int64 id = 16;
- optional string name = 17;
- }
- }
-}
-
-message Property {
- enum Meaning {
- NO_MEANING = 0;
- BLOB = 14;
- TEXT = 15;
- BYTESTRING = 16;
-
- ATOM_CATEGORY = 1;
- ATOM_LINK = 2;
- ATOM_TITLE = 3;
- ATOM_CONTENT = 4;
- ATOM_SUMMARY = 5;
- ATOM_AUTHOR = 6;
-
- GD_WHEN = 7;
- GD_EMAIL = 8;
- GEORSS_POINT = 9;
- GD_IM = 10;
-
- GD_PHONENUMBER = 11;
- GD_POSTALADDRESS = 12;
-
- GD_RATING = 13;
-
- BLOBKEY = 17;
- ENTITY_PROTO = 19;
-
- INDEX_VALUE = 18;
- };
-
- optional Meaning meaning = 1 [default = NO_MEANING];
- optional string meaning_uri = 2;
-
- required string name = 3;
-
- required PropertyValue value = 5;
-
- required bool multiple = 4;
-
- optional bool searchable = 6 [default=false];
-
- enum FtsTokenizationOption {
- HTML = 1;
- ATOM = 2;
- }
-
- optional FtsTokenizationOption fts_tokenization_option = 8;
-
- optional string locale = 9 [default = "en"];
-}
-
-message Path {
- repeated group Element = 1 {
- required string type = 2;
- optional int64 id = 3;
- optional string name = 4;
- }
-}
-
-message Reference {
- required string app = 13;
- optional string name_space = 20;
- required Path path = 14;
-}
-
-message User {
- required string email = 1;
- required string auth_domain = 2;
- optional string nickname = 3;
- optional string federated_identity = 6;
- optional string federated_provider = 7;
-}
-
-message EntityProto {
- required Reference key = 13;
- required Path entity_group = 16;
- optional User owner = 17;
-
- enum Kind {
- GD_CONTACT = 1;
- GD_EVENT = 2;
- GD_MESSAGE = 3;
- }
- optional Kind kind = 4;
- optional string kind_uri = 5;
-
- repeated Property property = 14;
- repeated Property raw_property = 15;
-
- optional int32 rank = 18;
-}
-
-message CompositeProperty {
- required int64 index_id = 1;
- repeated string value = 2;
-}
-
-message Index {
- required string entity_type = 1;
- required bool ancestor = 5;
- repeated group Property = 2 {
- required string name = 3;
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
- optional Direction direction = 4 [default = ASCENDING];
- }
-}
-
-message CompositeIndex {
- required string app_id = 1;
- required int64 id = 2;
- required Index definition = 3;
-
- enum State {
- WRITE_ONLY = 1;
- READ_WRITE = 2;
- DELETED = 3;
- ERROR = 4;
- }
- required State state = 4;
-
- optional bool only_use_if_required = 6 [default = false];
-}
-
-message IndexPostfix {
- message IndexValue {
- required string property_name = 1;
- required PropertyValue value = 2;
- }
-
- repeated IndexValue index_value = 1;
-
- optional Reference key = 2;
-
- optional bool before = 3 [default=true];
-}
-
-message IndexPosition {
- optional string key = 1;
-
- optional bool before = 2 [default=true];
-}
-
-message Snapshot {
- enum Status {
- INACTIVE = 0;
- ACTIVE = 1;
- }
-
- required int64 ts = 1;
-}
-
-message InternalHeader {
- optional string qos = 1;
-}
-
-message Transaction {
- optional InternalHeader header = 4;
- required fixed64 handle = 1;
- required string app = 2;
- optional bool mark_changes = 3 [default = false];
-}
-
-message Query {
- optional InternalHeader header = 39;
-
- required string app = 1;
- optional string name_space = 29;
-
- optional string kind = 3;
- optional Reference ancestor = 17;
-
- repeated group Filter = 4 {
- enum Operator {
- LESS_THAN = 1;
- LESS_THAN_OR_EQUAL = 2;
- GREATER_THAN = 3;
- GREATER_THAN_OR_EQUAL = 4;
- EQUAL = 5;
- IN = 6;
- EXISTS = 7;
- }
-
- required Operator op = 6;
- repeated Property property = 14;
- }
-
- optional string search_query = 8;
-
- repeated group Order = 9 {
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
-
- required string property = 10;
- optional Direction direction = 11 [default = ASCENDING];
- }
-
- enum Hint {
- ORDER_FIRST = 1;
- ANCESTOR_FIRST = 2;
- FILTER_FIRST = 3;
- }
- optional Hint hint = 18;
-
- optional int32 count = 23;
-
- optional int32 offset = 12 [default = 0];
-
- optional int32 limit = 16;
-
- optional CompiledCursor compiled_cursor = 30;
- optional CompiledCursor end_compiled_cursor = 31;
-
- repeated CompositeIndex composite_index = 19;
-
- optional bool require_perfect_plan = 20 [default = false];
-
- optional bool keys_only = 21 [default = false];
-
- optional Transaction transaction = 22;
-
- optional bool compile = 25 [default = false];
-
- optional int64 failover_ms = 26;
-
- optional bool strong = 32;
-
- repeated string property_name = 33;
-
- repeated string group_by_property_name = 34;
-
- optional bool distinct = 24;
-
- optional int64 min_safe_time_seconds = 35;
-
- repeated string safe_replica_name = 36;
-
- optional bool persist_offset = 37 [default=false];
-}
-
-message CompiledQuery {
- required group PrimaryScan = 1 {
- optional string index_name = 2;
-
- optional string start_key = 3;
- optional bool start_inclusive = 4;
- optional string end_key = 5;
- optional bool end_inclusive = 6;
-
- repeated string start_postfix_value = 22;
- repeated string end_postfix_value = 23;
-
- optional int64 end_unapplied_log_timestamp_us = 19;
- }
-
- repeated group MergeJoinScan = 7 {
- required string index_name = 8;
-
- repeated string prefix_value = 9;
-
- optional bool value_prefix = 20 [default=false];
- }
-
- optional Index index_def = 21;
-
- optional int32 offset = 10 [default = 0];
-
- optional int32 limit = 11;
-
- required bool keys_only = 12;
-
- repeated string property_name = 24;
-
- optional int32 distinct_infix_size = 25;
-
- optional group EntityFilter = 13 {
- optional bool distinct = 14 [default=false];
-
- optional string kind = 17;
- optional Reference ancestor = 18;
- }
-}
-
-message CompiledCursor {
- optional group Position = 2 {
- optional string start_key = 27;
-
- repeated group IndexValue = 29 {
- optional string property = 30;
- required PropertyValue value = 31;
- }
-
- optional Reference key = 32;
-
- optional bool start_inclusive = 28 [default=true];
- }
-}
-
-message Cursor {
- required fixed64 cursor = 1;
-
- optional string app = 2;
-}
-
-message Error {
- enum ErrorCode {
- BAD_REQUEST = 1;
- CONCURRENT_TRANSACTION = 2;
- INTERNAL_ERROR = 3;
- NEED_INDEX = 4;
- TIMEOUT = 5;
- PERMISSION_DENIED = 6;
- BIGTABLE_ERROR = 7;
- COMMITTED_BUT_STILL_APPLYING = 8;
- CAPABILITY_DISABLED = 9;
- TRY_ALTERNATE_BACKEND = 10;
- SAFE_TIME_TOO_OLD = 11;
- }
-}
-
-message Cost {
- optional int32 index_writes = 1;
- optional int32 index_write_bytes = 2;
- optional int32 entity_writes = 3;
- optional int32 entity_write_bytes = 4;
- optional group CommitCost = 5 {
- optional int32 requested_entity_puts = 6;
- optional int32 requested_entity_deletes = 7;
- };
- optional int32 approximate_storage_delta = 8;
- optional int32 id_sequence_updates = 9;
-}
-
-message GetRequest {
- optional InternalHeader header = 6;
-
- repeated Reference key = 1;
- optional Transaction transaction = 2;
-
- optional int64 failover_ms = 3;
-
- optional bool strong = 4;
-
- optional bool allow_deferred = 5 [default=false];
-}
-
-message GetResponse {
- repeated group Entity = 1 {
- optional EntityProto entity = 2;
- optional Reference key = 4;
-
- optional int64 version = 3;
- }
-
- repeated Reference deferred = 5;
-
- optional bool in_order = 6 [default=true];
-}
-
-message PutRequest {
- optional InternalHeader header = 11;
-
- repeated EntityProto entity = 1;
- optional Transaction transaction = 2;
- repeated CompositeIndex composite_index = 3;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-
- enum AutoIdPolicy {
- CURRENT = 0;
- SEQUENTIAL = 1;
- }
- optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
-}
-
-message PutResponse {
- repeated Reference key = 1;
- optional Cost cost = 2;
- repeated int64 version = 3;
-}
-
-message TouchRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 1;
- repeated CompositeIndex composite_index = 2;
- optional bool force = 3 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message TouchResponse {
- optional Cost cost = 1;
-}
-
-message DeleteRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 6;
- optional Transaction transaction = 5;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message DeleteResponse {
- optional Cost cost = 1;
- repeated int64 version = 3;
-}
-
-message NextRequest {
- optional InternalHeader header = 5;
-
- required Cursor cursor = 1;
- optional int32 count = 2;
-
- optional int32 offset = 4 [default = 0];
-
- optional bool compile = 3 [default = false];
-}
-
-message QueryResult {
- optional Cursor cursor = 1;
-
- repeated EntityProto result = 2;
-
- optional int32 skipped_results = 7;
-
- required bool more_results = 3;
-
- optional bool keys_only = 4;
-
- optional bool index_only = 9;
-
- optional bool small_ops = 10;
-
- optional CompiledQuery compiled_query = 5;
-
- optional CompiledCursor compiled_cursor = 6;
-
- repeated CompositeIndex index = 8;
-
- repeated int64 version = 11;
-}
-
-message AllocateIdsRequest {
- optional InternalHeader header = 4;
-
- optional Reference model_key = 1;
-
- optional int64 size = 2;
-
- optional int64 max = 3;
-
- repeated Reference reserve = 5;
-}
-
-message AllocateIdsResponse {
- required int64 start = 1;
- required int64 end = 2;
- optional Cost cost = 3;
-}
-
-message CompositeIndices {
- repeated CompositeIndex index = 1;
-}
-
-message AddActionsRequest {
- optional InternalHeader header = 3;
-
- required Transaction transaction = 1;
- repeated Action action = 2;
-}
-
-message AddActionsResponse {
-}
-
-message BeginTransactionRequest {
- optional InternalHeader header = 3;
-
- required string app = 1;
- optional bool allow_multiple_eg = 2 [default = false];
-}
-
-message CommitResponse {
- optional Cost cost = 1;
-
- repeated group Version = 3 {
- required Reference root_entity_key = 4;
- required int64 version = 5;
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go
deleted file mode 100644
index d538701ab3..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import netcontext "golang.org/x/net/context"
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-func AppID(c netcontext.Context) string {
- return appID(FullyQualifiedAppID(c))
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go
deleted file mode 100644
index e6b9227c56..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "appengine"
-
- netcontext "golang.org/x/net/context"
-)
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- return appengine.DefaultVersionHostname(fromContext(ctx))
-}
-
-func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
-func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string { return appengine.ServerSoftware() }
-func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
-func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
-func InstanceID() string { return appengine.InstanceID() }
-func IsDevAppServer() bool { return appengine.IsDevAppServer() }
-
-func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go
deleted file mode 100644
index ebe68b785b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "net/http"
- "os"
-
- netcontext "golang.org/x/net/context"
-)
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-const (
- hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
- hRequestLogId = "X-AppEngine-Request-Log-Id"
- hDatacenter = "X-AppEngine-Datacenter"
-)
-
-func ctxHeaders(ctx netcontext.Context) http.Header {
- return fromContext(ctx).Request().Header
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDefaultVersionHostname)
-}
-
-func RequestID(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hRequestLogId)
-}
-
-func Datacenter(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDatacenter)
-}
-
-func ServerSoftware() string {
- // TODO(dsymonds): Remove fallback when we've verified this.
- if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
- return s
- }
- return "Google App Engine/1.x.x"
-}
-
-// TODO(dsymonds): Remove the metadata fetches.
-
-func ModuleName(_ netcontext.Context) string {
- if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_name"))
-}
-
-func VersionID(_ netcontext.Context) string {
- if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
-}
-
-func InstanceID() string {
- if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
-}
-
-func partitionlessAppID() string {
- // gae_project has everything except the partition prefix.
- appID := os.Getenv("GAE_LONG_APP_ID")
- if appID == "" {
- appID = string(mustGetMetadata("instance/attributes/gae_project"))
- }
- return appID
-}
-
-func fullyQualifiedAppID(_ netcontext.Context) string {
- appID := partitionlessAppID()
-
- part := os.Getenv("GAE_PARTITION")
- if part == "" {
- part = string(mustGetMetadata("instance/attributes/gae_partition"))
- }
-
- if part != "" {
- appID = part + "~" + appID
- }
- return appID
-}
-
-func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go
deleted file mode 100644
index 66e8d76866..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package internal provides support for package appengine.
-//
-// Programs should not use this package directly. Its API is not stable.
-// Use packages appengine and appengine/* instead.
-package internal
-
-import (
- "fmt"
- "io"
- "log"
- "net/http"
- "net/url"
- "os"
-
- "github.com/golang/protobuf/proto"
-
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-// errorCodeMaps is a map of service name to the error code map for the service.
-var errorCodeMaps = make(map[string]map[int32]string)
-
-// RegisterErrorCodeMap is called from API implementations to register their
-// error code map. This should only be called from init functions.
-func RegisterErrorCodeMap(service string, m map[int32]string) {
- errorCodeMaps[service] = m
-}
-
-type timeoutCodeKey struct {
- service string
- code int32
-}
-
-// timeoutCodes is the set of service+code pairs that represent timeouts.
-var timeoutCodes = make(map[timeoutCodeKey]bool)
-
-func RegisterTimeoutErrorCode(service string, code int32) {
- timeoutCodes[timeoutCodeKey{service, code}] = true
-}
-
-// APIError is the type returned by appengine.Context's Call method
-// when an API call fails in an API-specific way. This may be, for instance,
-// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
-type APIError struct {
- Service string
- Detail string
- Code int32 // API-specific error code
-}
-
-func (e *APIError) Error() string {
- if e.Code == 0 {
- if e.Detail == "" {
- return "APIError "
- }
- return e.Detail
- }
- s := fmt.Sprintf("API error %d", e.Code)
- if m, ok := errorCodeMaps[e.Service]; ok {
- s += " (" + e.Service + ": " + m[e.Code] + ")"
- } else {
- // Shouldn't happen, but provide a bit more detail if it does.
- s = e.Service + " " + s
- }
- if e.Detail != "" {
- s += ": " + e.Detail
- }
- return s
-}
-
-func (e *APIError) IsTimeout() bool {
- return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
-}
-
-// CallError is the type returned by appengine.Context's Call method when an
-// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
-type CallError struct {
- Detail string
- Code int32
- // TODO: Remove this if we get a distinguishable error code.
- Timeout bool
-}
-
-func (e *CallError) Error() string {
- var msg string
- switch remotepb.RpcError_ErrorCode(e.Code) {
- case remotepb.RpcError_UNKNOWN:
- return e.Detail
- case remotepb.RpcError_OVER_QUOTA:
- msg = "Over quota"
- case remotepb.RpcError_CAPABILITY_DISABLED:
- msg = "Capability disabled"
- case remotepb.RpcError_CANCELLED:
- msg = "Canceled"
- default:
- msg = fmt.Sprintf("Call error %d", e.Code)
- }
- s := msg + ": " + e.Detail
- if e.Timeout {
- s += " (timeout)"
- }
- return s
-}
-
-func (e *CallError) IsTimeout() bool {
- return e.Timeout
-}
-
-func Main() {
- installHealthChecker(http.DefaultServeMux)
-
- port := "8080"
- if s := os.Getenv("PORT"); s != "" {
- port = s
- }
-
- if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
- log.Fatalf("http.ListenAndServe: %v", err)
- }
-}
-
-func installHealthChecker(mux *http.ServeMux) {
- // If no health check handler has been installed by this point, add a trivial one.
- const healthPath = "/_ah/health"
- hreq := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Path: healthPath,
- },
- }
- if _, pat := mux.Handler(hreq); pat != healthPath {
- mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "ok")
- })
- }
-}
-
-// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
-// The function should be prepared to be called on the same message more than once; it should only modify the
-// RPC request the first time.
-var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
deleted file mode 100644
index 20c595be30..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ /dev/null
@@ -1,899 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/log/log_service.proto
-// DO NOT EDIT!
-
-/*
-Package log is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/log/log_service.proto
-
-It has these top-level messages:
- LogServiceError
- UserAppLogLine
- UserAppLogGroup
- FlushRequest
- SetStatusRequest
- LogOffset
- LogLine
- RequestLog
- LogModuleVersion
- LogReadRequest
- LogReadResponse
- LogUsageRecord
- LogUsageRequest
- LogUsageResponse
-*/
-package log
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type LogServiceError_ErrorCode int32
-
-const (
- LogServiceError_OK LogServiceError_ErrorCode = 0
- LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
- LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
-)
-
-var LogServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_REQUEST",
- 2: "STORAGE_ERROR",
-}
-var LogServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_REQUEST": 1,
- "STORAGE_ERROR": 2,
-}
-
-func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
- p := new(LogServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x LogServiceError_ErrorCode) String() string {
- return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
-}
-func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = LogServiceError_ErrorCode(value)
- return nil
-}
-
-type LogServiceError struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogServiceError) Reset() { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage() {}
-
-type UserAppLogLine struct {
- TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
- Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage() {}
-
-func (m *UserAppLogLine) GetTimestampUsec() int64 {
- if m != nil && m.TimestampUsec != nil {
- return *m.TimestampUsec
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetLevel() int64 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetMessage() string {
- if m != nil && m.Message != nil {
- return *m.Message
- }
- return ""
-}
-
-type UserAppLogGroup struct {
- LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage() {}
-
-func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
- if m != nil {
- return m.LogLine
- }
- return nil
-}
-
-type FlushRequest struct {
- Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *FlushRequest) Reset() { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage() {}
-
-func (m *FlushRequest) GetLogs() []byte {
- if m != nil {
- return m.Logs
- }
- return nil
-}
-
-type SetStatusRequest struct {
- Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage() {}
-
-func (m *SetStatusRequest) GetStatus() string {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return ""
-}
-
-type LogOffset struct {
- RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogOffset) Reset() { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage() {}
-
-func (m *LogOffset) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-type LogLine struct {
- Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
- Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogLine) Reset() { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage() {}
-
-func (m *LogLine) GetTime() int64 {
- if m != nil && m.Time != nil {
- return *m.Time
- }
- return 0
-}
-
-func (m *LogLine) GetLevel() int32 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *LogLine) GetLogMessage() string {
- if m != nil && m.LogMessage != nil {
- return *m.LogMessage
- }
- return ""
-}
-
-type RequestLog struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
- ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
- RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
- Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
- Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
- Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
- StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
- Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
- Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
- Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
- Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
- HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
- Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
- ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
- Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
- UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
- UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
- Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
- ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
- Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
- Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
- TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
- TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
- WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
- PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
- Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
- CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
- Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
- LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
- AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
- ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
- WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
- WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
- ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
- ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *RequestLog) Reset() { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage() {}
-
-const Default_RequestLog_ModuleId string = "default"
-const Default_RequestLog_ReplicaIndex int32 = -1
-const Default_RequestLog_Finished bool = true
-
-func (m *RequestLog) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *RequestLog) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_RequestLog_ModuleId
-}
-
-func (m *RequestLog) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *RequestLog) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *RequestLog) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *RequestLog) GetIp() string {
- if m != nil && m.Ip != nil {
- return *m.Ip
- }
- return ""
-}
-
-func (m *RequestLog) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *RequestLog) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *RequestLog) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *RequestLog) GetLatency() int64 {
- if m != nil && m.Latency != nil {
- return *m.Latency
- }
- return 0
-}
-
-func (m *RequestLog) GetMcycles() int64 {
- if m != nil && m.Mcycles != nil {
- return *m.Mcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *RequestLog) GetResource() string {
- if m != nil && m.Resource != nil {
- return *m.Resource
- }
- return ""
-}
-
-func (m *RequestLog) GetHttpVersion() string {
- if m != nil && m.HttpVersion != nil {
- return *m.HttpVersion
- }
- return ""
-}
-
-func (m *RequestLog) GetStatus() int32 {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return 0
-}
-
-func (m *RequestLog) GetResponseSize() int64 {
- if m != nil && m.ResponseSize != nil {
- return *m.ResponseSize
- }
- return 0
-}
-
-func (m *RequestLog) GetReferrer() string {
- if m != nil && m.Referrer != nil {
- return *m.Referrer
- }
- return ""
-}
-
-func (m *RequestLog) GetUserAgent() string {
- if m != nil && m.UserAgent != nil {
- return *m.UserAgent
- }
- return ""
-}
-
-func (m *RequestLog) GetUrlMapEntry() string {
- if m != nil && m.UrlMapEntry != nil {
- return *m.UrlMapEntry
- }
- return ""
-}
-
-func (m *RequestLog) GetCombined() string {
- if m != nil && m.Combined != nil {
- return *m.Combined
- }
- return ""
-}
-
-func (m *RequestLog) GetApiMcycles() int64 {
- if m != nil && m.ApiMcycles != nil {
- return *m.ApiMcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *RequestLog) GetCost() float64 {
- if m != nil && m.Cost != nil {
- return *m.Cost
- }
- return 0
-}
-
-func (m *RequestLog) GetTaskQueueName() string {
- if m != nil && m.TaskQueueName != nil {
- return *m.TaskQueueName
- }
- return ""
-}
-
-func (m *RequestLog) GetTaskName() string {
- if m != nil && m.TaskName != nil {
- return *m.TaskName
- }
- return ""
-}
-
-func (m *RequestLog) GetWasLoadingRequest() bool {
- if m != nil && m.WasLoadingRequest != nil {
- return *m.WasLoadingRequest
- }
- return false
-}
-
-func (m *RequestLog) GetPendingTime() int64 {
- if m != nil && m.PendingTime != nil {
- return *m.PendingTime
- }
- return 0
-}
-
-func (m *RequestLog) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return Default_RequestLog_ReplicaIndex
-}
-
-func (m *RequestLog) GetFinished() bool {
- if m != nil && m.Finished != nil {
- return *m.Finished
- }
- return Default_RequestLog_Finished
-}
-
-func (m *RequestLog) GetCloneKey() []byte {
- if m != nil {
- return m.CloneKey
- }
- return nil
-}
-
-func (m *RequestLog) GetLine() []*LogLine {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *RequestLog) GetLinesIncomplete() bool {
- if m != nil && m.LinesIncomplete != nil {
- return *m.LinesIncomplete
- }
- return false
-}
-
-func (m *RequestLog) GetAppEngineRelease() []byte {
- if m != nil {
- return m.AppEngineRelease
- }
- return nil
-}
-
-func (m *RequestLog) GetExitReason() int32 {
- if m != nil && m.ExitReason != nil {
- return *m.ExitReason
- }
- return 0
-}
-
-func (m *RequestLog) GetWasThrottledForTime() bool {
- if m != nil && m.WasThrottledForTime != nil {
- return *m.WasThrottledForTime
- }
- return false
-}
-
-func (m *RequestLog) GetWasThrottledForRequests() bool {
- if m != nil && m.WasThrottledForRequests != nil {
- return *m.WasThrottledForRequests
- }
- return false
-}
-
-func (m *RequestLog) GetThrottledTime() int64 {
- if m != nil && m.ThrottledTime != nil {
- return *m.ThrottledTime
- }
- return 0
-}
-
-func (m *RequestLog) GetServerName() []byte {
- if m != nil {
- return m.ServerName
- }
- return nil
-}
-
-type LogModuleVersion struct {
- ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage() {}
-
-const Default_LogModuleVersion_ModuleId string = "default"
-
-func (m *LogModuleVersion) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_LogModuleVersion_ModuleId
-}
-
-func (m *LogModuleVersion) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-type LogReadRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
- ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
- StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
- Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
- RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
- MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
- IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
- Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
- CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
- HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
- IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
- AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
- IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
- IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
- CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
- NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage() {}
-
-func (m *LogReadRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogReadRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
- if m != nil {
- return m.ModuleVersion
- }
- return nil
-}
-
-func (m *LogReadRequest) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadRequest) GetRequestId() [][]byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetMinimumLogLevel() int32 {
- if m != nil && m.MinimumLogLevel != nil {
- return *m.MinimumLogLevel
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeIncomplete() bool {
- if m != nil && m.IncludeIncomplete != nil {
- return *m.IncludeIncomplete
- }
- return false
-}
-
-func (m *LogReadRequest) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogReadRequest) GetCombinedLogRegex() string {
- if m != nil && m.CombinedLogRegex != nil {
- return *m.CombinedLogRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetHostRegex() string {
- if m != nil && m.HostRegex != nil {
- return *m.HostRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeAppLogs() bool {
- if m != nil && m.IncludeAppLogs != nil {
- return *m.IncludeAppLogs
- }
- return false
-}
-
-func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
- if m != nil && m.AppLogsPerRequest != nil {
- return *m.AppLogsPerRequest
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeHost() bool {
- if m != nil && m.IncludeHost != nil {
- return *m.IncludeHost
- }
- return false
-}
-
-func (m *LogReadRequest) GetIncludeAll() bool {
- if m != nil && m.IncludeAll != nil {
- return *m.IncludeAll
- }
- return false
-}
-
-func (m *LogReadRequest) GetCacheIterator() bool {
- if m != nil && m.CacheIterator != nil {
- return *m.CacheIterator
- }
- return false
-}
-
-func (m *LogReadRequest) GetNumShards() int32 {
- if m != nil && m.NumShards != nil {
- return *m.NumShards
- }
- return 0
-}
-
-type LogReadResponse struct {
- Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
- Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
- LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage() {}
-
-func (m *LogReadResponse) GetLog() []*RequestLog {
- if m != nil {
- return m.Log
- }
- return nil
-}
-
-func (m *LogReadResponse) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadResponse) GetLastEndTime() int64 {
- if m != nil && m.LastEndTime != nil {
- return *m.LastEndTime
- }
- return 0
-}
-
-type LogUsageRecord struct {
- VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
- Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
- Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage() {}
-
-func (m *LogUsageRecord) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *LogUsageRecord) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetTotalSize() int64 {
- if m != nil && m.TotalSize != nil {
- return *m.TotalSize
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetRecords() int32 {
- if m != nil && m.Records != nil {
- return *m.Records
- }
- return 0
-}
-
-type LogUsageRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
- ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
- CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
- UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
- VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage() {}
-
-const Default_LogUsageRequest_ResolutionHours uint32 = 1
-
-func (m *LogUsageRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogUsageRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogUsageRequest) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetResolutionHours() uint32 {
- if m != nil && m.ResolutionHours != nil {
- return *m.ResolutionHours
- }
- return Default_LogUsageRequest_ResolutionHours
-}
-
-func (m *LogUsageRequest) GetCombineVersions() bool {
- if m != nil && m.CombineVersions != nil {
- return *m.CombineVersions
- }
- return false
-}
-
-func (m *LogUsageRequest) GetUsageVersion() int32 {
- if m != nil && m.UsageVersion != nil {
- return *m.UsageVersion
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetVersionsOnly() bool {
- if m != nil && m.VersionsOnly != nil {
- return *m.VersionsOnly
- }
- return false
-}
-
-type LogUsageResponse struct {
- Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
- Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage() {}
-
-func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
- if m != nil {
- return m.Usage
- }
- return nil
-}
-
-func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func init() {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto
deleted file mode 100644
index 8981dc4757..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto
+++ /dev/null
@@ -1,150 +0,0 @@
-syntax = "proto2";
-option go_package = "log";
-
-package appengine;
-
-message LogServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_REQUEST = 1;
- STORAGE_ERROR = 2;
- }
-}
-
-message UserAppLogLine {
- required int64 timestamp_usec = 1;
- required int64 level = 2;
- required string message = 3;
-}
-
-message UserAppLogGroup {
- repeated UserAppLogLine log_line = 2;
-}
-
-message FlushRequest {
- optional bytes logs = 1;
-}
-
-message SetStatusRequest {
- required string status = 1;
-}
-
-
-message LogOffset {
- optional bytes request_id = 1;
-}
-
-message LogLine {
- required int64 time = 1;
- required int32 level = 2;
- required string log_message = 3;
-}
-
-message RequestLog {
- required string app_id = 1;
- optional string module_id = 37 [default="default"];
- required string version_id = 2;
- required bytes request_id = 3;
- optional LogOffset offset = 35;
- required string ip = 4;
- optional string nickname = 5;
- required int64 start_time = 6;
- required int64 end_time = 7;
- required int64 latency = 8;
- required int64 mcycles = 9;
- required string method = 10;
- required string resource = 11;
- required string http_version = 12;
- required int32 status = 13;
- required int64 response_size = 14;
- optional string referrer = 15;
- optional string user_agent = 16;
- required string url_map_entry = 17;
- required string combined = 18;
- optional int64 api_mcycles = 19;
- optional string host = 20;
- optional double cost = 21;
-
- optional string task_queue_name = 22;
- optional string task_name = 23;
-
- optional bool was_loading_request = 24;
- optional int64 pending_time = 25;
- optional int32 replica_index = 26 [default = -1];
- optional bool finished = 27 [default = true];
- optional bytes clone_key = 28;
-
- repeated LogLine line = 29;
-
- optional bool lines_incomplete = 36;
- optional bytes app_engine_release = 38;
-
- optional int32 exit_reason = 30;
- optional bool was_throttled_for_time = 31;
- optional bool was_throttled_for_requests = 32;
- optional int64 throttled_time = 33;
-
- optional bytes server_name = 34;
-}
-
-message LogModuleVersion {
- optional string module_id = 1 [default="default"];
- optional string version_id = 2;
-}
-
-message LogReadRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- repeated LogModuleVersion module_version = 19;
-
- optional int64 start_time = 3;
- optional int64 end_time = 4;
- optional LogOffset offset = 5;
- repeated bytes request_id = 6;
-
- optional int32 minimum_log_level = 7;
- optional bool include_incomplete = 8;
- optional int64 count = 9;
-
- optional string combined_log_regex = 14;
- optional string host_regex = 15;
- optional int32 replica_index = 16;
-
- optional bool include_app_logs = 10;
- optional int32 app_logs_per_request = 17;
- optional bool include_host = 11;
- optional bool include_all = 12;
- optional bool cache_iterator = 13;
- optional int32 num_shards = 18;
-}
-
-message LogReadResponse {
- repeated RequestLog log = 1;
- optional LogOffset offset = 2;
- optional int64 last_end_time = 3;
-}
-
-message LogUsageRecord {
- optional string version_id = 1;
- optional int32 start_time = 2;
- optional int32 end_time = 3;
- optional int64 count = 4;
- optional int64 total_size = 5;
- optional int32 records = 6;
-}
-
-message LogUsageRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- optional int32 start_time = 3;
- optional int32 end_time = 4;
- optional uint32 resolution_hours = 5 [default = 1];
- optional bool combine_versions = 6;
- optional int32 usage_version = 7;
- optional bool versions_only = 8;
-}
-
-message LogUsageResponse {
- repeated LogUsageRecord usage = 1;
- optional LogUsageRecord summary = 2;
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go
deleted file mode 100644
index 9cc1f71d10..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file has code for accessing metadata.
-//
-// References:
-// https://cloud.google.com/compute/docs/metadata
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
-)
-
-const (
- metadataHost = "metadata"
- metadataPath = "/computeMetadata/v1/"
-)
-
-var (
- metadataRequestHeaders = http.Header{
- "Metadata-Flavor": []string{"Google"},
- }
-)
-
-// TODO(dsymonds): Do we need to support default values, like Python?
-func mustGetMetadata(key string) []byte {
- b, err := getMetadata(key)
- if err != nil {
- log.Fatalf("Metadata fetch failed: %v", err)
- }
- return b
-}
-
-func getMetadata(key string) ([]byte, error) {
- // TODO(dsymonds): May need to use url.Parse to support keys with query args.
- req := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: metadataHost,
- Path: metadataPath + key,
- },
- Header: metadataRequestHeaders,
- Host: metadataHost,
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
- }
- return ioutil.ReadAll(resp.Body)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
deleted file mode 100644
index a0145ed317..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
+++ /dev/null
@@ -1,375 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/modules/modules_service.proto
-// DO NOT EDIT!
-
-/*
-Package modules is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/modules/modules_service.proto
-
-It has these top-level messages:
- ModulesServiceError
- GetModulesRequest
- GetModulesResponse
- GetVersionsRequest
- GetVersionsResponse
- GetDefaultVersionRequest
- GetDefaultVersionResponse
- GetNumInstancesRequest
- GetNumInstancesResponse
- SetNumInstancesRequest
- SetNumInstancesResponse
- StartModuleRequest
- StartModuleResponse
- StopModuleRequest
- StopModuleResponse
- GetHostnameRequest
- GetHostnameResponse
-*/
-package modules
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type ModulesServiceError_ErrorCode int32
-
-const (
- ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
- ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
- ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
- ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
- ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
- ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
-)
-
-var ModulesServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_MODULE",
- 2: "INVALID_VERSION",
- 3: "INVALID_INSTANCES",
- 4: "TRANSIENT_ERROR",
- 5: "UNEXPECTED_STATE",
-}
-var ModulesServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_MODULE": 1,
- "INVALID_VERSION": 2,
- "INVALID_INSTANCES": 3,
- "TRANSIENT_ERROR": 4,
- "UNEXPECTED_STATE": 5,
-}
-
-func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
- p := new(ModulesServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x ModulesServiceError_ErrorCode) String() string {
- return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
-}
-func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = ModulesServiceError_ErrorCode(value)
- return nil
-}
-
-type ModulesServiceError struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
-func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
-func (*ModulesServiceError) ProtoMessage() {}
-
-type GetModulesRequest struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
-func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetModulesRequest) ProtoMessage() {}
-
-type GetModulesResponse struct {
- Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
-func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetModulesResponse) ProtoMessage() {}
-
-func (m *GetModulesResponse) GetModule() []string {
- if m != nil {
- return m.Module
- }
- return nil
-}
-
-type GetVersionsRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
-func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsRequest) ProtoMessage() {}
-
-func (m *GetVersionsRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-type GetVersionsResponse struct {
- Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
-func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
-func (*GetVersionsResponse) ProtoMessage() {}
-
-func (m *GetVersionsResponse) GetVersion() []string {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type GetDefaultVersionRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
-func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionRequest) ProtoMessage() {}
-
-func (m *GetDefaultVersionRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-type GetDefaultVersionResponse struct {
- Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
-func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
-func (*GetDefaultVersionResponse) ProtoMessage() {}
-
-func (m *GetDefaultVersionResponse) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type GetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
-func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesRequest) ProtoMessage() {}
-
-func (m *GetNumInstancesRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *GetNumInstancesRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type GetNumInstancesResponse struct {
- Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
-func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*GetNumInstancesResponse) ProtoMessage() {}
-
-func (m *GetNumInstancesResponse) GetInstances() int64 {
- if m != nil && m.Instances != nil {
- return *m.Instances
- }
- return 0
-}
-
-type SetNumInstancesRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
-func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesRequest) ProtoMessage() {}
-
-func (m *SetNumInstancesRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *SetNumInstancesRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-func (m *SetNumInstancesRequest) GetInstances() int64 {
- if m != nil && m.Instances != nil {
- return *m.Instances
- }
- return 0
-}
-
-type SetNumInstancesResponse struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
-func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
-func (*SetNumInstancesResponse) ProtoMessage() {}
-
-type StartModuleRequest struct {
- Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
-func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StartModuleRequest) ProtoMessage() {}
-
-func (m *StartModuleRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *StartModuleRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type StartModuleResponse struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
-func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StartModuleResponse) ProtoMessage() {}
-
-type StopModuleRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
-func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
-func (*StopModuleRequest) ProtoMessage() {}
-
-func (m *StopModuleRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *StopModuleRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-type StopModuleResponse struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
-func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
-func (*StopModuleResponse) ProtoMessage() {}
-
-type GetHostnameRequest struct {
- Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
- Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
- Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
-func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameRequest) ProtoMessage() {}
-
-func (m *GetHostnameRequest) GetModule() string {
- if m != nil && m.Module != nil {
- return *m.Module
- }
- return ""
-}
-
-func (m *GetHostnameRequest) GetVersion() string {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return ""
-}
-
-func (m *GetHostnameRequest) GetInstance() string {
- if m != nil && m.Instance != nil {
- return *m.Instance
- }
- return ""
-}
-
-type GetHostnameResponse struct {
- Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
-func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
-func (*GetHostnameResponse) ProtoMessage() {}
-
-func (m *GetHostnameResponse) GetHostname() string {
- if m != nil && m.Hostname != nil {
- return *m.Hostname
- }
- return ""
-}
-
-func init() {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
deleted file mode 100644
index d29f0065a2..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
+++ /dev/null
@@ -1,80 +0,0 @@
-syntax = "proto2";
-option go_package = "modules";
-
-package appengine;
-
-message ModulesServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_MODULE = 1;
- INVALID_VERSION = 2;
- INVALID_INSTANCES = 3;
- TRANSIENT_ERROR = 4;
- UNEXPECTED_STATE = 5;
- }
-}
-
-message GetModulesRequest {
-}
-
-message GetModulesResponse {
- repeated string module = 1;
-}
-
-message GetVersionsRequest {
- optional string module = 1;
-}
-
-message GetVersionsResponse {
- repeated string version = 1;
-}
-
-message GetDefaultVersionRequest {
- optional string module = 1;
-}
-
-message GetDefaultVersionResponse {
- required string version = 1;
-}
-
-message GetNumInstancesRequest {
- optional string module = 1;
- optional string version = 2;
-}
-
-message GetNumInstancesResponse {
- required int64 instances = 1;
-}
-
-message SetNumInstancesRequest {
- optional string module = 1;
- optional string version = 2;
- required int64 instances = 3;
-}
-
-message SetNumInstancesResponse {}
-
-message StartModuleRequest {
- required string module = 1;
- required string version = 2;
-}
-
-message StartModuleResponse {}
-
-message StopModuleRequest {
- optional string module = 1;
- optional string version = 2;
-}
-
-message StopModuleResponse {}
-
-message GetHostnameRequest {
- optional string module = 1;
- optional string version = 2;
- optional string instance = 3;
-}
-
-message GetHostnameResponse {
- required string hostname = 1;
-}
-
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go
deleted file mode 100644
index 3b94cf0c6a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements a network dialer that limits the number of concurrent connections.
-// It is only used for API calls.
-
-import (
- "log"
- "net"
- "runtime"
- "sync"
- "time"
-)
-
-var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
-
-func limitRelease() {
- // non-blocking
- select {
- case <-limitSem:
- default:
- // This should not normally happen.
- log.Print("appengine: unbalanced limitSem release!")
- }
-}
-
-func limitDial(network, addr string) (net.Conn, error) {
- limitSem <- 1
-
- // Dial with a timeout in case the API host is MIA.
- // The connection should normally be very fast.
- conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
- if err != nil {
- limitRelease()
- return nil, err
- }
- lc := &limitConn{Conn: conn}
- runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
- return lc, nil
-}
-
-type limitConn struct {
- close sync.Once
- net.Conn
-}
-
-func (lc *limitConn) Close() error {
- defer lc.close.Do(func() {
- limitRelease()
- runtime.SetFinalizer(lc, nil)
- })
- return lc.Conn.Close()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh
deleted file mode 100644
index 2fdb546a63..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash -e
-#
-# This script rebuilds the generated code for the protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-
-PKG=google.golang.org/appengine
-
-function die() {
- echo 1>&2 $*
- exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go protoc protoc-gen-go; do
- q=$(which $tool) || die "didn't find $tool"
- echo 1>&2 "$tool: $q"
-done
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd $base
-
-# Run protoc once per package.
-for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
- echo 1>&2 "* $dir"
- protoc --go_out=. $dir/*.proto
-done
-
-for f in $(find $PKG/internal -name '*.pb.go'); do
- # Remove proto.RegisterEnum calls.
- # These cause duplicate registration panics when these packages
- # are used on classic App Engine. proto.RegisterEnum only affects
- # parsing the text format; we don't care about that.
- # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
- sed -i '/proto.RegisterEnum/d' $f
-done
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
deleted file mode 100644
index 526bd39e6d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-// DO NOT EDIT!
-
-/*
-Package remote_api is a generated protocol buffer package.
-
-It is generated from these files:
- google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-It has these top-level messages:
- Request
- ApplicationError
- RpcError
- Response
-*/
-package remote_api
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type RpcError_ErrorCode int32
-
-const (
- RpcError_UNKNOWN RpcError_ErrorCode = 0
- RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
- RpcError_PARSE_ERROR RpcError_ErrorCode = 2
- RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
- RpcError_OVER_QUOTA RpcError_ErrorCode = 4
- RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
- RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
- RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
- RpcError_BAD_REQUEST RpcError_ErrorCode = 8
- RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
- RpcError_CANCELLED RpcError_ErrorCode = 10
- RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
- RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
-)
-
-var RpcError_ErrorCode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "CALL_NOT_FOUND",
- 2: "PARSE_ERROR",
- 3: "SECURITY_VIOLATION",
- 4: "OVER_QUOTA",
- 5: "REQUEST_TOO_LARGE",
- 6: "CAPABILITY_DISABLED",
- 7: "FEATURE_DISABLED",
- 8: "BAD_REQUEST",
- 9: "RESPONSE_TOO_LARGE",
- 10: "CANCELLED",
- 11: "REPLAY_ERROR",
- 12: "DEADLINE_EXCEEDED",
-}
-var RpcError_ErrorCode_value = map[string]int32{
- "UNKNOWN": 0,
- "CALL_NOT_FOUND": 1,
- "PARSE_ERROR": 2,
- "SECURITY_VIOLATION": 3,
- "OVER_QUOTA": 4,
- "REQUEST_TOO_LARGE": 5,
- "CAPABILITY_DISABLED": 6,
- "FEATURE_DISABLED": 7,
- "BAD_REQUEST": 8,
- "RESPONSE_TOO_LARGE": 9,
- "CANCELLED": 10,
- "REPLAY_ERROR": 11,
- "DEADLINE_EXCEEDED": 12,
-}
-
-func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
- p := new(RpcError_ErrorCode)
- *p = x
- return p
-}
-func (x RpcError_ErrorCode) String() string {
- return proto.EnumName(RpcError_ErrorCode_name, int32(x))
-}
-func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
- if err != nil {
- return err
- }
- *x = RpcError_ErrorCode(value)
- return nil
-}
-
-type Request struct {
- ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
- Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
- Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
- RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-
-func (m *Request) GetServiceName() string {
- if m != nil && m.ServiceName != nil {
- return *m.ServiceName
- }
- return ""
-}
-
-func (m *Request) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *Request) GetRequest() []byte {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *Request) GetRequestId() string {
- if m != nil && m.RequestId != nil {
- return *m.RequestId
- }
- return ""
-}
-
-type ApplicationError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *ApplicationError) Reset() { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage() {}
-
-func (m *ApplicationError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *ApplicationError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type RpcError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *RpcError) Reset() { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage() {}
-
-func (m *RpcError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *RpcError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type Response struct {
- Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
- Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
- ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
- JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
- RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-
-func (m *Response) GetResponse() []byte {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *Response) GetException() []byte {
- if m != nil {
- return m.Exception
- }
- return nil
-}
-
-func (m *Response) GetApplicationError() *ApplicationError {
- if m != nil {
- return m.ApplicationError
- }
- return nil
-}
-
-func (m *Response) GetJavaException() []byte {
- if m != nil {
- return m.JavaException
- }
- return nil
-}
-
-func (m *Response) GetRpcError() *RpcError {
- if m != nil {
- return m.RpcError
- }
- return nil
-}
-
-func init() {
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
deleted file mode 100644
index f21763a4e2..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-option go_package = "remote_api";
-
-package remote_api;
-
-message Request {
- required string service_name = 2;
- required string method = 3;
- required bytes request = 4;
- optional string request_id = 5;
-}
-
-message ApplicationError {
- required int32 code = 1;
- required string detail = 2;
-}
-
-message RpcError {
- enum ErrorCode {
- UNKNOWN = 0;
- CALL_NOT_FOUND = 1;
- PARSE_ERROR = 2;
- SECURITY_VIOLATION = 3;
- OVER_QUOTA = 4;
- REQUEST_TOO_LARGE = 5;
- CAPABILITY_DISABLED = 6;
- FEATURE_DISABLED = 7;
- BAD_REQUEST = 8;
- RESPONSE_TOO_LARGE = 9;
- CANCELLED = 10;
- REPLAY_ERROR = 11;
- DEADLINE_EXCEEDED = 12;
- }
- required int32 code = 1;
- optional string detail = 2;
-}
-
-message Response {
- optional bytes response = 1;
- optional bytes exception = 2;
- optional ApplicationError application_error = 3;
- optional bytes java_exception = 4;
- optional RpcError rpc_error = 5;
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go
deleted file mode 100644
index 28a6d18120..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements hooks for applying datastore transactions.
-
-import (
- "errors"
- "reflect"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- pb "google.golang.org/appengine/internal/datastore"
-)
-
-var transactionSetters = make(map[reflect.Type]reflect.Value)
-
-// RegisterTransactionSetter registers a function that sets transaction information
-// in a protocol buffer message. f should be a function with two arguments,
-// the first being a protocol buffer type, and the second being *datastore.Transaction.
-func RegisterTransactionSetter(f interface{}) {
- v := reflect.ValueOf(f)
- transactionSetters[v.Type().In(0)] = v
-}
-
-// applyTransaction applies the transaction t to message pb
-// by using the relevant setter passed to RegisterTransactionSetter.
-func applyTransaction(pb proto.Message, t *pb.Transaction) {
- v := reflect.ValueOf(pb)
- if f, ok := transactionSetters[v.Type()]; ok {
- f.Call([]reflect.Value{v, reflect.ValueOf(t)})
- }
-}
-
-var transactionKey = "used for *Transaction"
-
-func transactionFromContext(ctx netcontext.Context) *transaction {
- t, _ := ctx.Value(&transactionKey).(*transaction)
- return t
-}
-
-func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
- return netcontext.WithValue(ctx, &transactionKey, t)
-}
-
-type transaction struct {
- transaction pb.Transaction
- finished bool
-}
-
-var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-
-func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
- if transactionFromContext(c) != nil {
- return errors.New("nested transactions are not supported")
- }
-
- // Begin the transaction.
- t := &transaction{}
- req := &pb.BeginTransactionRequest{
- App: proto.String(FullyQualifiedAppID(c)),
- }
- if xg {
- req.AllowMultipleEg = proto.Bool(true)
- }
- if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
- return err
- }
-
- // Call f, rolling back the transaction if f returns a non-nil error, or panics.
- // The panic is not recovered.
- defer func() {
- if t.finished {
- return
- }
- t.finished = true
- // Ignore the error return value, since we are already returning a non-nil
- // error (or we're panicking).
- Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
- }()
- if err := f(withTransaction(c, t)); err != nil {
- return err
- }
- t.finished = true
-
- // Commit the transaction.
- res := &pb.CommitResponse{}
- err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
- if ae, ok := err.(*APIError); ok {
- /* TODO: restore this conditional
- if appengine.IsDevAppServer() {
- */
- // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
- // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
- if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
- return ErrConcurrentTransaction
- }
- if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
- return ErrConcurrentTransaction
- }
- }
- return err
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go
deleted file mode 100644
index 21860ca082..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2012 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import (
- "fmt"
- "regexp"
-
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// Namespace returns a replacement context that operates within the given namespace.
-func Namespace(c context.Context, namespace string) (context.Context, error) {
- if !validNamespace.MatchString(namespace) {
- return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
- }
- return internal.NamespacedContext(c, namespace), nil
-}
-
-// validNamespace matches valid namespace names.
-var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go
deleted file mode 100644
index 05642a992a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package appengine
-
-import "golang.org/x/net/context"
-
-// IsTimeoutError reports whether err is a timeout error.
-func IsTimeoutError(err error) bool {
- if err == context.DeadlineExceeded {
- return true
- }
- if t, ok := err.(interface {
- IsTimeout() bool
- }); ok {
- return t.IsTimeout()
- }
- return false
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml
deleted file mode 100644
index c037df0de0..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-sudo: false
-language: go
-go:
-- 1.4
-- 1.5
-install:
-- go get -v google.golang.org/cloud/...
-script:
-- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
-- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
- go test -v -tags=integration google.golang.org/cloud/...
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS
deleted file mode 100644
index 3da443dc9f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS
+++ /dev/null
@@ -1,12 +0,0 @@
-# This is the official list of cloud authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS files.
-# See the latter for an explanation.
-
-# Names should be added to this file as:
-# Name or Organization
-# The email address is not required for organizations.
-
-Google Inc.
-Palm Stone Games, Inc.
-Péter Szilágyi
-Tyler Treat
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md
deleted file mode 100644
index 9a1cab2878..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md
+++ /dev/null
@@ -1,114 +0,0 @@
-# Contributing
-
-1. Sign one of the contributor license agreements below.
-1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
-1. Get the cloud package by running `go get -d google.golang.org/cloud`.
- 1. If you have already checked out the source, make sure that the remote git
- origin is https://code.googlesource.com/gocloud:
-
- git remote set-url origin https://code.googlesource.com/gocloud
-1. Make changes and create a change by running `git codereview change `,
-provide a command message, and use `git codereview mail` to create a Gerrit CL.
-1. Keep amending to the change and mail as your recieve feedback.
-
-## Integration Tests
-
-Additional to the unit tests, you may run the integration test suite.
-
-To run the integrations tests, creating and configuration of a project in the
-Google Developers Console is required. Once you create a project, set the
-following environment variables to be able to run the against the actual APIs.
-
-- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
-- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
-
-Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
-The storage integration test will create and delete some objects in this bucket.
-
-Install the [gcloud command-line tool][gcloudcli] to your machine and use it
-to create the indexes used in the datastore integration tests with indexes
-found in `datastore/testdata/index.yaml`:
-
-From the project's root directory:
-
-``` sh
-# Install the app component
-$ gcloud components update app
-
-# Set the default project in your env
-$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
-
-# Authenticate the gcloud tool with your account
-$ gcloud auth login
-
-# Create the indexes
-$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
-
-```
-
-You can run the integration tests by running:
-
-``` sh
-$ go test -v -tags=integration google.golang.org/cloud/...
-```
-
-## Contributor License Agreements
-
-Before we can accept your pull requests you'll need to sign a Contributor
-License Agreement (CLA):
-
-- **If you are an individual writing original source code** and **you own the
-- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
-- **If you work for a company that wants to allow you to contribute your work**,
-then you'll need to sign a [corporate CLA][corpcla].
-
-You can sign these electronically (just scroll to the bottom). After that,
-we'll be able to accept your pull requests.
-
-## Contributor Code of Conduct
-
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
-
-[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
-[indvcla]: https://developers.google.com/open-source/cla/individual
-[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS
deleted file mode 100644
index 475ac6a667..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS
+++ /dev/null
@@ -1,24 +0,0 @@
-# People who have agreed to one of the CLAs and can contribute patches.
-# The AUTHORS file lists the copyright holders; this file
-# lists people. For example, Google employees are listed here
-# but not in AUTHORS, because Google holds the copyright.
-#
-# https://developers.google.com/open-source/cla/individual
-# https://developers.google.com/open-source/cla/corporate
-#
-# Names should be added to this file as:
-# Name
-
-# Keep the list alphabetically sorted.
-
-Andrew Gerrand
-Brad Fitzpatrick
-Burcu Dogan
-Dave Day
-David Symonds
-Glenn Lewis
-Johan Euphrosine
-Luna Duclos
-Michael McGreevy
-Péter Szilágyi
-Tyler Treat
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE
deleted file mode 100644
index a4c5efd822..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2014 Google Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md
deleted file mode 100644
index 10d3995d58..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Google Cloud for Go
-
-[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
-
-**NOTE:** These packages are experimental, and may occasionally make
-backwards-incompatible changes.
-
-**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
-
-Go packages for Google Cloud Platform services. Supported APIs include:
-
- * Google Cloud Datastore
- * Google Cloud Storage
- * Google Cloud Pub/Sub
- * Google Cloud Container Engine
-
-``` go
-import "google.golang.org/cloud"
-```
-
-Documentation and examples are available at
-[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud).
-
-## Authorization
-
-Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2.
-Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2)
-for examples on using oauth2 with the Cloud package.
-
-## Google Cloud Datastore
-
-[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully
-managed, schemaless database for storing non-relational data. Cloud Datastore
-automatically scales with your users and supports ACID transactions, high availability
-of reads and writes, strong consistency for reads and ancestor queries, and eventual
-consistency for all other queries.
-
-Follow the [activation instructions][cloud-datastore-activation] to use the Google
-Cloud Datastore API with your project.
-
-[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore)
-
-
-```go
-type Post struct {
- Title string
- Body string `datastore:",noindex"`
- PublishedAt time.Time
-}
-keys := []*datastore.Key{
- datastore.NewKey(ctx, "Post", "post1", 0, nil),
- datastore.NewKey(ctx, "Post", "post2", 0, nil),
-}
-posts := []*Post{
- {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
- {Title: "Post 2", Body: "...", PublishedAt: time.Now()},
-}
-if _, err := datastore.PutMulti(ctx, keys, posts); err != nil {
- log.Println(err)
-}
-```
-
-## Google Cloud Storage
-
-[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
-data on Google infrastructure with very high reliability, performance and availability,
-and can be used to distribute large data objects to users via direct download.
-
-[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage)
-
-
-```go
-// Read the object1 from bucket.
-rc, err := storage.NewReader(ctx, "bucket", "object1")
-if err != nil {
- log.Fatal(err)
-}
-slurp, err := ioutil.ReadAll(rc)
-rc.Close()
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Google Cloud Pub/Sub (Alpha)
-
-> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in
-> backward-incompatible ways and is not recommended for production use. It is not
-> subject to any SLA or deprecation policy.
-
-[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
-your services with reliable, many-to-many, asynchronous messaging hosted on Google's
-infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
-for building your own robust, global services.
-
-[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub)
-
-
-```go
-// Publish "hello world" on topic1.
-msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
- Data: []byte("hello world"),
-})
-if err != nil {
- log.Println(err)
-}
-// Pull messages via subscription1.
-msgs, err := pubsub.Pull(ctx, "subscription1", 1)
-if err != nil {
- log.Println(err)
-}
-```
-
-## Contributing
-
-Contributions are welcome. Please, see the
-[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
-document for details. We're using Gerrit for our code reviews. Please don't open pull
-requests against this repo, new pull requests will be automatically closed.
-
-Please note that this project is released with a Contributor Code of Conduct.
-By participating in this project you agree to abide by its terms.
-See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
-for more information.
-
-[cloud-datastore]: https://cloud.google.com/datastore/
-[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
-[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
-
-[cloud-pubsub]: https://cloud.google.com/pubsub/
-[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
-
-[cloud-storage]: https://cloud.google.com/storage/
-[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
-[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go
deleted file mode 100644
index a634b05524..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cloud contains Google Cloud Platform APIs related types
-// and common functions.
-package cloud
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
- "google.golang.org/cloud/internal"
-)
-
-// NewContext returns a new context that uses the provided http.Client.
-// Provided http.Client is responsible to authorize and authenticate
-// the requests made to the Google Cloud APIs.
-// It mutates the client's original Transport to append the cloud
-// package's user-agent to the outgoing requests.
-// You can obtain the project ID from the Google Developers Console,
-// https://console.developers.google.com.
-func NewContext(projID string, c *http.Client) context.Context {
- if c == nil {
- panic("invalid nil *http.Client passed to NewContext")
- }
- return WithContext(context.Background(), projID, c)
-}
-
-// WithContext returns a new context in a similar way NewContext does,
-// but initiates the new context with the specified parent.
-func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
- // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
- // Do User-Agent some other way.
- if _, ok := c.Transport.(*internal.Transport); !ok {
- c.Transport = &internal.Transport{Base: c.Transport}
- }
- return internal.WithContext(parent, projID, c)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go
deleted file mode 100644
index 3dd684e088..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metadata provides access to Google Compute Engine (GCE)
-// metadata and API service accounts.
-//
-// This package is a wrapper around the GCE metadata service,
-// as documented at https://developers.google.com/compute/docs/metadata.
-package metadata
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/cloud/internal"
-)
-
-type cachedValue struct {
- k string
- trim bool
- mu sync.Mutex
- v string
-}
-
-var (
- projID = &cachedValue{k: "project/project-id", trim: true}
- projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
- instID = &cachedValue{k: "instance/id", trim: true}
-)
-
-var metaClient = &http.Client{
- Transport: &internal.Transport{
- Base: &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 750 * time.Millisecond,
- KeepAlive: 30 * time.Second,
- }).Dial,
- ResponseHeaderTimeout: 750 * time.Millisecond,
- },
- },
-}
-
-// NotDefinedError is returned when requested metadata is not defined.
-//
-// The underlying string is the suffix after "/computeMetadata/v1/".
-//
-// This error is not returned if the value is defined to be the empty
-// string.
-type NotDefinedError string
-
-func (suffix NotDefinedError) Error() string {
- return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
-}
-
-// Get returns a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
-// 169.254.169.254 will be used instead.
-//
-// If the requested metadata is not defined, the returned error will
-// be of type NotDefinedError.
-func Get(suffix string) (string, error) {
- val, _, err := getETag(suffix)
- return val, err
-}
-
-// getETag returns a value from the metadata service as well as the associated
-// ETag. This func is otherwise equivalent to Get.
-func getETag(suffix string) (value, etag string, err error) {
- // Using a fixed IP makes it very difficult to spoof the metadata service in
- // a container, which is an important use-case for local testing of cloud
- // deployments. To enable spoofing of the metadata service, the environment
- // variable GCE_METADATA_HOST is first inspected to decide where metadata
- // requests shall go.
- host := os.Getenv("GCE_METADATA_HOST")
- if host == "" {
- // Using 169.254.169.254 instead of "metadata" here because Go
- // binaries built with the "netgo" tag and without cgo won't
- // know the search suffix for "metadata" is
- // ".google.internal", and this IP address is documented as
- // being stable anyway.
- host = "169.254.169.254"
- }
- url := "http://" + host + "/computeMetadata/v1/" + suffix
- req, _ := http.NewRequest("GET", url, nil)
- req.Header.Set("Metadata-Flavor", "Google")
- res, err := metaClient.Do(req)
- if err != nil {
- return "", "", err
- }
- defer res.Body.Close()
- if res.StatusCode == http.StatusNotFound {
- return "", "", NotDefinedError(suffix)
- }
- if res.StatusCode != 200 {
- return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
- }
- all, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
- return string(all), res.Header.Get("Etag"), nil
-}
-
-func getTrimmed(suffix string) (s string, err error) {
- s, err = Get(suffix)
- s = strings.TrimSpace(s)
- return
-}
-
-func (c *cachedValue) get() (v string, err error) {
- defer c.mu.Unlock()
- c.mu.Lock()
- if c.v != "" {
- return c.v, nil
- }
- if c.trim {
- v, err = getTrimmed(c.k)
- } else {
- v, err = Get(c.k)
- }
- if err == nil {
- c.v = v
- }
- return
-}
-
-var onGCE struct {
- sync.Mutex
- set bool
- v bool
-}
-
-// OnGCE reports whether this process is running on Google Compute Engine.
-func OnGCE() bool {
- defer onGCE.Unlock()
- onGCE.Lock()
- if onGCE.set {
- return onGCE.v
- }
- onGCE.set = true
-
- // We use the DNS name of the metadata service here instead of the IP address
- // because we expect that to fail faster in the not-on-GCE case.
- res, err := metaClient.Get("http://metadata.google.internal")
- if err != nil {
- return false
- }
- onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
- return onGCE.v
-}
-
-// Subscribe subscribes to a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// Subscribe calls fn with the latest metadata value indicated by the provided
-// suffix. If the metadata value is deleted, fn is called with the empty string
-// and ok false. Subscribe blocks until fn returns a non-nil error or the value
-// is deleted. Subscribe returns the error value returned from the last call to
-// fn, which may be nil when ok == false.
-func Subscribe(suffix string, fn func(v string, ok bool) error) error {
- const failedSubscribeSleep = time.Second * 5
-
- // First check to see if the metadata value exists at all.
- val, lastETag, err := getETag(suffix)
- if err != nil {
- return err
- }
-
- if err := fn(val, true); err != nil {
- return err
- }
-
- ok := true
- suffix += "?wait_for_change=true&last_etag="
- for {
- val, etag, err := getETag(suffix + url.QueryEscape(lastETag))
- if err != nil {
- if _, deleted := err.(NotDefinedError); !deleted {
- time.Sleep(failedSubscribeSleep)
- continue // Retry on other errors.
- }
- ok = false
- }
- lastETag = etag
-
- if err := fn(val, ok); err != nil || !ok {
- return err
- }
- }
-}
-
-// ProjectID returns the current instance's project ID string.
-func ProjectID() (string, error) { return projID.get() }
-
-// NumericProjectID returns the current instance's numeric project ID.
-func NumericProjectID() (string, error) { return projNum.get() }
-
-// InternalIP returns the instance's primary internal IP address.
-func InternalIP() (string, error) {
- return getTrimmed("instance/network-interfaces/0/ip")
-}
-
-// ExternalIP returns the instance's primary external (public) IP address.
-func ExternalIP() (string, error) {
- return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
-}
-
-// Hostname returns the instance's hostname. This will be of the form
-// ".c..internal".
-func Hostname() (string, error) {
- return getTrimmed("instance/hostname")
-}
-
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
-func InstanceTags() ([]string, error) {
- var s []string
- j, err := Get("instance/tags")
- if err != nil {
- return nil, err
- }
- if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-// InstanceID returns the current VM's numeric instance ID.
-func InstanceID() (string, error) {
- return instID.get()
-}
-
-// InstanceName returns the current VM's instance ID string.
-func InstanceName() (string, error) {
- host, err := Hostname()
- if err != nil {
- return "", err
- }
- return strings.Split(host, ".")[0], nil
-}
-
-// Zone returns the current VM's zone, such as "us-central1-b".
-func Zone() (string, error) {
- zone, err := getTrimmed("instance/zone")
- // zone is of the form "projects//zones/".
- if err != nil {
- return "", err
- }
- return zone[strings.LastIndex(zone, "/")+1:], nil
-}
-
-// InstanceAttributes returns the list of user-defined attributes,
-// assigned when initially creating a GCE VM instance. The value of an
-// attribute can be obtained with InstanceAttributeValue.
-func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
-
-// ProjectAttributes returns the list of user-defined attributes
-// applying to the project as a whole, not just this VM. The value of
-// an attribute can be obtained with ProjectAttributeValue.
-func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
-
-func lines(suffix string) ([]string, error) {
- j, err := Get(suffix)
- if err != nil {
- return nil, err
- }
- s := strings.Split(strings.TrimSpace(j), "\n")
- for i := range s {
- s[i] = strings.TrimSpace(s[i])
- }
- return s, nil
-}
-
-// InstanceAttributeValue returns the value of the provided VM
-// instance attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// InstanceAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func InstanceAttributeValue(attr string) (string, error) {
- return Get("instance/attributes/" + attr)
-}
-
-// ProjectAttributeValue returns the value of the provided
-// project attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// ProjectAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func ProjectAttributeValue(attr string) (string, error) {
- return Get("project/attributes/" + attr)
-}
-
-// Scopes returns the service account scopes for the given account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
-func Scopes(serviceAccount string) ([]string, error) {
- if serviceAccount == "" {
- serviceAccount = "default"
- }
- return lines("instance/service-accounts/" + serviceAccount + "/scopes")
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go
deleted file mode 100644
index 8b0db1b5da..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides support for the cloud packages.
-//
-// Users should not import this package directly.
-package internal
-
-import (
- "fmt"
- "net/http"
- "sync"
-
- "golang.org/x/net/context"
-)
-
-type contextKey struct{}
-
-func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
- if c == nil {
- panic("nil *http.Client passed to WithContext")
- }
- if projID == "" {
- panic("empty project ID passed to WithContext")
- }
- return context.WithValue(parent, contextKey{}, &cloudContext{
- ProjectID: projID,
- HTTPClient: c,
- })
-}
-
-const userAgent = "gcloud-golang/0.1"
-
-type cloudContext struct {
- ProjectID string
- HTTPClient *http.Client
-
- mu sync.Mutex // guards svc
- svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
-}
-
-// Service returns the result of the fill function if it's never been
-// called before for the given name (which is assumed to be an API
-// service name, like "datastore"). If it has already been cached, the fill
-// func is not run.
-// It's safe for concurrent use by multiple goroutines.
-func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
- return cc(ctx).service(name, fill)
-}
-
-func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.svc == nil {
- c.svc = make(map[string]interface{})
- } else if v, ok := c.svc[name]; ok {
- return v
- }
- v := fill(c.HTTPClient)
- c.svc[name] = v
- return v
-}
-
-// Transport is an http.RoundTripper that appends
-// Google Cloud client's user-agent to the original
-// request's user-agent header.
-type Transport struct {
- // Base represents the actual http.RoundTripper
- // the requests will be delegated to.
- Base http.RoundTripper
-}
-
-// RoundTrip appends a user-agent to the existing user-agent
-// header and delegates the request to the base http.RoundTripper.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- req = cloneRequest(req)
- ua := req.Header.Get("User-Agent")
- if ua == "" {
- ua = userAgent
- } else {
- ua = fmt.Sprintf("%s %s", ua, userAgent)
- }
- req.Header.Set("User-Agent", ua)
- return t.Base.RoundTrip(req)
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header)
- for k, s := range r.Header {
- r2.Header[k] = s
- }
- return r2
-}
-
-func ProjID(ctx context.Context) string {
- return cc(ctx).ProjectID
-}
-
-func HTTPClient(ctx context.Context) *http.Client {
- return cc(ctx).HTTPClient
-}
-
-// cc returns the internal *cloudContext (cc) state for a context.Context.
-// It panics if the user did it wrong.
-func cc(ctx context.Context) *cloudContext {
- if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
- return c
- }
- panic("invalid context.Context type; it should be created with cloud.NewContext")
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go
deleted file mode 100644
index c5ccf4f56d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Package opts holds the DialOpts struct, configurable by
-// cloud.ClientOptions to set up transports for cloud packages.
-//
-// This is a separate page to prevent cycles between the core
-// cloud packages.
-package opts
-
-import (
- "net/http"
-
- "golang.org/x/oauth2"
- "google.golang.org/grpc"
-)
-
-type DialOpt struct {
- Endpoint string
- Scopes []string
- UserAgent string
-
- TokenSource oauth2.TokenSource
-
- HTTPClient *http.Client
- GRPCClient *grpc.ClientConn
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc
deleted file mode 100644
index 2f673a84b1..0000000000
Binary files a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc and /dev/null differ
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go
deleted file mode 100644
index d4a5aea294..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloud
-
-import (
- "net/http"
-
- "golang.org/x/oauth2"
- "google.golang.org/cloud/internal/opts"
- "google.golang.org/grpc"
-)
-
-// ClientOption is used when construct clients for each cloud service.
-type ClientOption interface {
- // Resolve configures the given DialOpts for this option.
- Resolve(*opts.DialOpt)
-}
-
-// WithTokenSource returns a ClientOption that specifies an OAuth2 token
-// source to be used as the basis for authentication.
-func WithTokenSource(s oauth2.TokenSource) ClientOption {
- return withTokenSource{s}
-}
-
-type withTokenSource struct{ ts oauth2.TokenSource }
-
-func (w withTokenSource) Resolve(o *opts.DialOpt) {
- o.TokenSource = w.ts
-}
-
-// WithEndpoint returns a ClientOption that overrides the default endpoint
-// to be used for a service.
-func WithEndpoint(url string) ClientOption {
- return withEndpoint(url)
-}
-
-type withEndpoint string
-
-func (w withEndpoint) Resolve(o *opts.DialOpt) {
- o.Endpoint = string(w)
-}
-
-// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
-// to be used for a service.
-func WithScopes(scope ...string) ClientOption {
- return withScopes(scope)
-}
-
-type withScopes []string
-
-func (w withScopes) Resolve(o *opts.DialOpt) {
- o.Scopes = []string(w)
-}
-
-// WithUserAgent returns a ClientOption that sets the User-Agent.
-func WithUserAgent(ua string) ClientOption {
- return withUA(ua)
-}
-
-type withUA string
-
-func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) }
-
-// WithBaseHTTP returns a ClientOption that specifies the HTTP client to
-// use as the basis of communications. This option may only be used with
-// services that support HTTP as their communication transport.
-func WithBaseHTTP(client *http.Client) ClientOption {
- return withBaseHTTP{client}
-}
-
-type withBaseHTTP struct{ client *http.Client }
-
-func (w withBaseHTTP) Resolve(o *opts.DialOpt) {
- o.HTTPClient = w.client
-}
-
-// WithBaseGRPC returns a ClientOption that specifies the GRPC client
-// connection to use as the basis of communications. This option many only be
-// used with services that support HRPC as their communication transport.
-func WithBaseGRPC(client *grpc.ClientConn) ClientOption {
- return withBaseGRPC{client}
-}
-
-type withBaseGRPC struct{ client *grpc.ClientConn }
-
-func (w withBaseGRPC) Resolve(o *opts.DialOpt) {
- o.GRPCClient = w.client
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go
deleted file mode 100644
index 71c5800a86..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
- "fmt"
-
- "golang.org/x/net/context"
- raw "google.golang.org/api/storage/v1"
-)
-
-// ACLRole is the the access permission for the entity.
-type ACLRole string
-
-const (
- RoleOwner ACLRole = "OWNER"
- RoleReader ACLRole = "READER"
-)
-
-// ACLEntity is an entity holding an ACL permission.
-//
-// It could be in the form of:
-// "user-", "user-","group-", "group-",
-// "domain-" and "project-team-".
-//
-// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
-type ACLEntity string
-
-const (
- AllUsers ACLEntity = "allUsers"
- AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
-)
-
-// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket.
-// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or
-// more objects. An object is a blob of data that is stored in a bucket.
-type ACLRule struct {
- // Entity identifies the entity holding the current rule's permissions.
- Entity ACLEntity
-
- // Role is the the access permission for the entity.
- Role ACLRole
-}
-
-// DefaultACL returns the default object ACL entries for the named bucket.
-func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) {
- acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Context(ctx).Do()
- if err != nil {
- return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err)
- }
- r := make([]ACLRule, 0, len(acls.Items))
- for _, v := range acls.Items {
- if m, ok := v.(map[string]interface{}); ok {
- entity, ok1 := m["entity"].(string)
- role, ok2 := m["role"].(string)
- if ok1 && ok2 {
- r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
- }
- }
- }
- return r, nil
-}
-
-// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket.
-func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error {
- acl := &raw.ObjectAccessControl{
- Bucket: bucket,
- Entity: string(entity),
- Role: string(role),
- }
- _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err)
- }
- return nil
-}
-
-// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket.
-func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error {
- err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err)
- }
- return nil
-}
-
-// BucketACL returns the ACL entries for the named bucket.
-func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) {
- acls, err := rawService(ctx).BucketAccessControls.List(bucket).Context(ctx).Do()
- if err != nil {
- return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err)
- }
- r := make([]ACLRule, len(acls.Items))
- for i, v := range acls.Items {
- r[i].Entity = ACLEntity(v.Entity)
- r[i].Role = ACLRole(v.Role)
- }
- return r, nil
-}
-
-// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket.
-func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error {
- acl := &raw.BucketAccessControl{
- Bucket: bucket,
- Entity: string(entity),
- Role: string(role),
- }
- _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err)
- }
- return nil
-}
-
-// DeleteBucketACLRule deletes the named ACL entity for the named bucket.
-func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error {
- err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err)
- }
- return nil
-}
-
-// ACL returns the ACL entries for the named object.
-func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) {
- acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Context(ctx).Do()
- if err != nil {
- return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err)
- }
- r := make([]ACLRule, 0, len(acls.Items))
- for _, v := range acls.Items {
- if m, ok := v.(map[string]interface{}); ok {
- entity, ok1 := m["entity"].(string)
- role, ok2 := m["role"].(string)
- if ok1 && ok2 {
- r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
- }
- }
- }
- return r, nil
-}
-
-// PutACLRule saves the named ACL entity with the provided role for the named object.
-func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error {
- acl := &raw.ObjectAccessControl{
- Bucket: bucket,
- Entity: string(entity),
- Role: string(role),
- }
- _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err)
- }
- return nil
-}
-
-// DeleteACLRule deletes the named ACL entity for the named object.
-func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error {
- err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Context(ctx).Do()
- if err != nil {
- return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err)
- }
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go
deleted file mode 100644
index bf22c6aea5..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package storage contains a Google Cloud Storage client.
-//
-// This package is experimental and may make backwards-incompatible changes.
-package storage
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strings"
- "time"
-
- "google.golang.org/cloud/internal"
-
- "golang.org/x/net/context"
- "google.golang.org/api/googleapi"
- raw "google.golang.org/api/storage/v1"
-)
-
-var (
- ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
- ErrObjectNotExist = errors.New("storage: object doesn't exist")
-)
-
-const (
- // ScopeFullControl grants permissions to manage your
- // data and permissions in Google Cloud Storage.
- ScopeFullControl = raw.DevstorageFullControlScope
-
- // ScopeReadOnly grants permissions to
- // view your data in Google Cloud Storage.
- ScopeReadOnly = raw.DevstorageReadOnlyScope
-
- // ScopeReadWrite grants permissions to manage your
- // data in Google Cloud Storage.
- ScopeReadWrite = raw.DevstorageReadWriteScope
-)
-
-// TODO(jbd): Add storage.buckets.list.
-// TODO(jbd): Add storage.buckets.insert.
-// TODO(jbd): Add storage.buckets.update.
-// TODO(jbd): Add storage.buckets.delete.
-
-// TODO(jbd): Add storage.objects.watch.
-
-// BucketInfo returns the metadata for the specified bucket.
-func BucketInfo(ctx context.Context, name string) (*Bucket, error) {
- resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Context(ctx).Do()
- if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
- return nil, ErrBucketNotExist
- }
- if err != nil {
- return nil, err
- }
- return newBucket(resp), nil
-}
-
-// ListObjects lists objects from the bucket. You can specify a query
-// to filter the results. If q is nil, no filtering is applied.
-func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) {
- c := rawService(ctx).Objects.List(bucket)
- c.Projection("full")
- if q != nil {
- c.Delimiter(q.Delimiter)
- c.Prefix(q.Prefix)
- c.Versions(q.Versions)
- c.PageToken(q.Cursor)
- if q.MaxResults > 0 {
- c.MaxResults(int64(q.MaxResults))
- }
- }
- resp, err := c.Context(ctx).Do()
- if err != nil {
- return nil, err
- }
- objects := &Objects{
- Results: make([]*Object, len(resp.Items)),
- Prefixes: make([]string, len(resp.Prefixes)),
- }
- for i, item := range resp.Items {
- objects.Results[i] = newObject(item)
- }
- for i, prefix := range resp.Prefixes {
- objects.Prefixes[i] = prefix
- }
- if resp.NextPageToken != "" {
- next := Query{}
- if q != nil {
- // keep the other filtering
- // criteria if there is a query
- next = *q
- }
- next.Cursor = resp.NextPageToken
- objects.Next = &next
- }
- return objects, nil
-}
-
-// SignedURLOptions allows you to restrict the access to the signed URL.
-type SignedURLOptions struct {
- // GoogleAccessID represents the authorizer of the signed URL generation.
- // It is typically the Google service account client email address from
- // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
- // Required.
- GoogleAccessID string
-
- // PrivateKey is the Google service account private key. It is obtainable
- // from the Google Developers Console.
- // At https://console.developers.google.com/project//apiui/credential,
- // create a service account client ID or reuse one of your existing service account
- // credentials. Click on the "Generate new P12 key" to generate and download
- // a new private key. Once you download the P12 file, use the following command
- // to convert it into a PEM file.
- //
- // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
- //
- // Provide the contents of the PEM file as a byte slice.
- // Required.
- PrivateKey []byte
-
- // Method is the HTTP method to be used with the signed URL.
- // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
- // Required.
- Method string
-
- // Expires is the expiration time on the signed URL. It must be
- // a datetime in the future.
- // Required.
- Expires time.Time
-
- // ContentType is the content type header the client must provide
- // to use the generated signed URL.
- // Optional.
- ContentType string
-
- // Headers is a list of extention headers the client must provide
- // in order to use the generated signed URL.
- // Optional.
- Headers []string
-
- // MD5 is the base64 encoded MD5 checksum of the file.
- // If provided, the client should provide the exact value on the request
- // header in order to use the signed URL.
- // Optional.
- MD5 []byte
-}
-
-// SignedURL returns a URL for the specified object. Signed URLs allow
-// the users access to a restricted resource for a limited time without having a
-// Google account or signing in. For more information about the signed
-// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
-func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
- if opts == nil {
- return "", errors.New("storage: missing required SignedURLOptions")
- }
- if opts.GoogleAccessID == "" || opts.PrivateKey == nil {
- return "", errors.New("storage: missing required credentials to generate a signed URL")
- }
- if opts.Method == "" {
- return "", errors.New("storage: missing required method option")
- }
- if opts.Expires.IsZero() {
- return "", errors.New("storage: missing required expires option")
- }
- key, err := parseKey(opts.PrivateKey)
- if err != nil {
- return "", err
- }
- h := sha256.New()
- fmt.Fprintf(h, "%s\n", opts.Method)
- fmt.Fprintf(h, "%s\n", opts.MD5)
- fmt.Fprintf(h, "%s\n", opts.ContentType)
- fmt.Fprintf(h, "%d\n", opts.Expires.Unix())
- fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n"))
- fmt.Fprintf(h, "/%s/%s", bucket, name)
- b, err := rsa.SignPKCS1v15(
- rand.Reader,
- key,
- crypto.SHA256,
- h.Sum(nil),
- )
- if err != nil {
- return "", err
- }
- encoded := base64.StdEncoding.EncodeToString(b)
- u := &url.URL{
- Scheme: "https",
- Host: "storage.googleapis.com",
- Path: fmt.Sprintf("/%s/%s", bucket, name),
- }
- q := u.Query()
- q.Set("GoogleAccessId", opts.GoogleAccessID)
- q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
- q.Set("Signature", string(encoded))
- u.RawQuery = q.Encode()
- return u.String(), nil
-}
-
-// StatObject returns meta information about the specified object.
-func StatObject(ctx context.Context, bucket, name string) (*Object, error) {
- o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Context(ctx).Do()
- if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
- return nil, ErrObjectNotExist
- }
- if err != nil {
- return nil, err
- }
- return newObject(o), nil
-}
-
-// UpdateAttrs updates an object with the provided attributes.
-// All zero-value attributes are ignored.
-func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) {
- o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Context(ctx).Do()
- if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
- return nil, ErrObjectNotExist
- }
- if err != nil {
- return nil, err
- }
- return newObject(o), nil
-}
-
-// DeleteObject deletes the single specified object.
-func DeleteObject(ctx context.Context, bucket, name string) error {
- return rawService(ctx).Objects.Delete(bucket, name).Context(ctx).Do()
-}
-
-// CopyObject copies the source object to the destination.
-// The copied object's attributes are overwritten by attrs if non-nil.
-func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) {
- if srcBucket == "" || destBucket == "" {
- return nil, errors.New("storage: srcBucket and destBucket must both be non-empty")
- }
- if srcName == "" || destName == "" {
- return nil, errors.New("storage: srcName and destName must be non-empty")
- }
- var rawObject *raw.Object
- if attrs != nil {
- attrs.Name = destName
- if attrs.ContentType == "" {
- return nil, errors.New("storage: attrs.ContentType must be non-empty")
- }
- rawObject = attrs.toRawObject(destBucket)
- }
- o, err := rawService(ctx).Objects.Copy(
- srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Context(ctx).Do()
- if err != nil {
- return nil, err
- }
- return newObject(o), nil
-}
-
-// NewReader creates a new io.ReadCloser to read the contents
-// of the object.
-func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) {
- hc := internal.HTTPClient(ctx)
- u := &url.URL{
- Scheme: "https",
- Host: "storage.googleapis.com",
- Path: fmt.Sprintf("/%s/%s", bucket, name),
- }
- res, err := hc.Get(u.String())
- if err != nil {
- return nil, err
- }
- if res.StatusCode == http.StatusNotFound {
- res.Body.Close()
- return nil, ErrObjectNotExist
- }
- if res.StatusCode < 200 || res.StatusCode > 299 {
- res.Body.Close()
- return res.Body, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status)
- }
- return res.Body, nil
-}
-
-// NewWriter returns a storage Writer that writes to the GCS object
-// identified by the specified name.
-// If such an object doesn't exist, it creates one.
-// Attributes can be set on the object by modifying the returned Writer's
-// ObjectAttrs field before the first call to Write. The name parameter to this
-// function is ignored if the Name field of the ObjectAttrs field is set to a
-// non-empty string.
-//
-// It is the caller's responsibility to call Close when writing is done.
-//
-// The object is not available and any previous object with the same
-// name is not replaced on Cloud Storage until Close is called.
-func NewWriter(ctx context.Context, bucket, name string) *Writer {
- return &Writer{
- ctx: ctx,
- bucket: bucket,
- name: name,
- donec: make(chan struct{}),
- }
-}
-
-func rawService(ctx context.Context) *raw.Service {
- return internal.Service(ctx, "storage", func(hc *http.Client) interface{} {
- svc, _ := raw.New(hc)
- return svc
- }).(*raw.Service)
-}
-
-// parseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func parseKey(key []byte) (*rsa.PrivateKey, error) {
- if block, _ := pem.Decode(key); block != nil {
- key = block.Bytes
- }
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
- if err != nil {
- parsedKey, err = x509.ParsePKCS1PrivateKey(key)
- if err != nil {
- return nil, err
- }
- }
- parsed, ok := parsedKey.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("oauth2: private key is invalid")
- }
- return parsed, nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go
deleted file mode 100644
index 060deb6ad7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
- "encoding/base64"
- "io"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- raw "google.golang.org/api/storage/v1"
-)
-
-// Bucket represents a Google Cloud Storage bucket.
-type Bucket struct {
- // Name is the name of the bucket.
- Name string
-
- // ACL is the list of access control rules on the bucket.
- ACL []ACLRule
-
- // DefaultObjectACL is the list of access controls to
- // apply to new objects when no object ACL is provided.
- DefaultObjectACL []ACLRule
-
- // Location is the location of the bucket. It defaults to "US".
- Location string
-
- // Metageneration is the metadata generation of the bucket.
- // Read-only.
- Metageneration int64
-
- // StorageClass is the storage class of the bucket. This defines
- // how objects in the bucket are stored and determines the SLA
- // and the cost of storage. Typical values are "STANDARD" and
- // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD".
- StorageClass string
-
- // Created is the creation time of the bucket.
- // Read-only.
- Created time.Time
-}
-
-func newBucket(b *raw.Bucket) *Bucket {
- if b == nil {
- return nil
- }
- bucket := &Bucket{
- Name: b.Name,
- Location: b.Location,
- Metageneration: b.Metageneration,
- StorageClass: b.StorageClass,
- Created: convertTime(b.TimeCreated),
- }
- acl := make([]ACLRule, len(b.Acl))
- for i, rule := range b.Acl {
- acl[i] = ACLRule{
- Entity: ACLEntity(rule.Entity),
- Role: ACLRole(rule.Role),
- }
- }
- bucket.ACL = acl
- objACL := make([]ACLRule, len(b.DefaultObjectAcl))
- for i, rule := range b.DefaultObjectAcl {
- objACL[i] = ACLRule{
- Entity: ACLEntity(rule.Entity),
- Role: ACLRole(rule.Role),
- }
- }
- bucket.DefaultObjectACL = objACL
- return bucket
-}
-
-// ObjectAttrs is the user-editable object attributes.
-type ObjectAttrs struct {
- // Name is the name of the object.
- Name string
-
- // ContentType is the MIME type of the object's content.
- // Optional.
- ContentType string
-
- // ContentLanguage is the optional RFC 1766 Content-Language of
- // the object's content sent in response headers.
- ContentLanguage string
-
- // ContentEncoding is the optional Content-Encoding of the object
- // sent it the response headers.
- ContentEncoding string
-
- // CacheControl is the optional Cache-Control header of the object
- // sent in the response headers.
- CacheControl string
-
- // ContentDisposition is the optional Content-Disposition header of the object
- // sent in the response headers.
- ContentDisposition string
-
- // ACL is the list of access control rules for the object.
- // Optional. If nil or empty, existing ACL rules are preserved.
- ACL []ACLRule
-
- // Metadata represents user-provided metadata, in key/value pairs.
- // It can be nil if the current metadata values needs to preserved.
- Metadata map[string]string
-}
-
-func (o ObjectAttrs) toRawObject(bucket string) *raw.Object {
- var acl []*raw.ObjectAccessControl
- if len(o.ACL) > 0 {
- acl = make([]*raw.ObjectAccessControl, len(o.ACL))
- for i, rule := range o.ACL {
- acl[i] = &raw.ObjectAccessControl{
- Entity: string(rule.Entity),
- Role: string(rule.Role),
- }
- }
- }
- return &raw.Object{
- Bucket: bucket,
- Name: o.Name,
- ContentType: o.ContentType,
- ContentEncoding: o.ContentEncoding,
- ContentLanguage: o.ContentLanguage,
- CacheControl: o.CacheControl,
- ContentDisposition: o.ContentDisposition,
- Acl: acl,
- Metadata: o.Metadata,
- }
-}
-
-// Object represents a Google Cloud Storage (GCS) object.
-type Object struct {
- // Bucket is the name of the bucket containing this GCS object.
- Bucket string
-
- // Name is the name of the object within the bucket.
- Name string
-
- // ContentType is the MIME type of the object's content.
- ContentType string
-
- // ContentLanguage is the content language of the object's content.
- ContentLanguage string
-
- // CacheControl is the Cache-Control header to be sent in the response
- // headers when serving the object data.
- CacheControl string
-
- // ACL is the list of access control rules for the object.
- ACL []ACLRule
-
- // Owner is the owner of the object.
- //
- // If non-zero, it is in the form of "user-".
- Owner string
-
- // Size is the length of the object's content.
- Size int64
-
- // ContentEncoding is the encoding of the object's content.
- ContentEncoding string
-
- // MD5 is the MD5 hash of the object's content.
- MD5 []byte
-
- // CRC32C is the CRC32 checksum of the object's content using
- // the Castagnoli93 polynomial.
- CRC32C uint32
-
- // MediaLink is an URL to the object's content.
- MediaLink string
-
- // Metadata represents user-provided metadata, in key/value pairs.
- // It can be nil if no metadata is provided.
- Metadata map[string]string
-
- // Generation is the generation number of the object's content.
- Generation int64
-
- // MetaGeneration is the version of the metadata for this
- // object at this generation. This field is used for preconditions
- // and for detecting changes in metadata. A metageneration number
- // is only meaningful in the context of a particular generation
- // of a particular object.
- MetaGeneration int64
-
- // StorageClass is the storage class of the bucket.
- // This value defines how objects in the bucket are stored and
- // determines the SLA and the cost of storage. Typical values are
- // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY".
- // It defaults to "STANDARD".
- StorageClass string
-
- // Deleted is the time the object was deleted.
- // If not deleted, it is the zero value.
- Deleted time.Time
-
- // Updated is the creation or modification time of the object.
- // For buckets with versioning enabled, changing an object's
- // metadata does not change this property.
- Updated time.Time
-}
-
-// convertTime converts a time in RFC3339 format to time.Time.
-// If any error occurs in parsing, the zero-value time.Time is silently returned.
-func convertTime(t string) time.Time {
- var r time.Time
- if t != "" {
- r, _ = time.Parse(time.RFC3339, t)
- }
- return r
-}
-
-func newObject(o *raw.Object) *Object {
- if o == nil {
- return nil
- }
- acl := make([]ACLRule, len(o.Acl))
- for i, rule := range o.Acl {
- acl[i] = ACLRule{
- Entity: ACLEntity(rule.Entity),
- Role: ACLRole(rule.Role),
- }
- }
- owner := ""
- if o.Owner != nil {
- owner = o.Owner.Entity
- }
- md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
- var crc32c uint32
- d, err := base64.StdEncoding.DecodeString(o.Crc32c)
- if err == nil && len(d) == 4 {
- crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
- }
- return &Object{
- Bucket: o.Bucket,
- Name: o.Name,
- ContentType: o.ContentType,
- ContentLanguage: o.ContentLanguage,
- CacheControl: o.CacheControl,
- ACL: acl,
- Owner: owner,
- ContentEncoding: o.ContentEncoding,
- Size: int64(o.Size),
- MD5: md5,
- CRC32C: crc32c,
- MediaLink: o.MediaLink,
- Metadata: o.Metadata,
- Generation: o.Generation,
- MetaGeneration: o.Metageneration,
- StorageClass: o.StorageClass,
- Deleted: convertTime(o.TimeDeleted),
- Updated: convertTime(o.Updated),
- }
-}
-
-// Query represents a query to filter objects from a bucket.
-type Query struct {
- // Delimiter returns results in a directory-like fashion.
- // Results will contain only objects whose names, aside from the
- // prefix, do not contain delimiter. Objects whose names,
- // aside from the prefix, contain delimiter will have their name,
- // truncated after the delimiter, returned in prefixes.
- // Duplicate prefixes are omitted.
- // Optional.
- Delimiter string
-
- // Prefix is the prefix filter to query objects
- // whose names begin with this prefix.
- // Optional.
- Prefix string
-
- // Versions indicates whether multiple versions of the same
- // object will be included in the results.
- Versions bool
-
- // Cursor is a previously-returned page token
- // representing part of the larger set of results to view.
- // Optional.
- Cursor string
-
- // MaxResults is the maximum number of items plus prefixes
- // to return. As duplicate prefixes are omitted,
- // fewer total results may be returned than requested.
- // The default page limit is used if it is negative or zero.
- MaxResults int
-}
-
-// Objects represents a list of objects returned from
-// a bucket look-p request and a query to retrieve more
-// objects from the next pages.
-type Objects struct {
- // Results represent a list of object results.
- Results []*Object
-
- // Next is the continuation query to retrieve more
- // results with the same filtering criteria. If there
- // are no more results to retrieve, it is nil.
- Next *Query
-
- // Prefixes represents prefixes of objects
- // matching-but-not-listed up to and including
- // the requested delimiter.
- Prefixes []string
-}
-
-// contentTyper implements ContentTyper to enable an
-// io.ReadCloser to specify its MIME type.
-type contentTyper struct {
- io.Reader
- t string
-}
-
-func (c *contentTyper) ContentType() string {
- return c.t
-}
-
-// A Writer writes a Cloud Storage object.
-type Writer struct {
- // ObjectAttrs are optional attributes to set on the object. Any attributes
- // must be initialized before the first Write call. Nil or zero-valued
- // attributes are ignored.
- ObjectAttrs
-
- ctx context.Context
- bucket string
- name string
-
- once sync.Once
-
- opened bool
- r io.Reader
- pw *io.PipeWriter
-
- donec chan struct{} // closed after err and obj are set.
- err error
- obj *Object
-}
-
-func (w *Writer) open() {
- attrs := w.ObjectAttrs
- // Always set the name, otherwise the backend
- // rejects the request and responds with an HTTP 400.
- if attrs.Name == "" {
- attrs.Name = w.name
- }
- pr, pw := io.Pipe()
- w.r = &contentTyper{pr, attrs.ContentType}
- w.pw = pw
- w.opened = true
-
- go func() {
- resp, err := rawService(w.ctx).Objects.Insert(
- w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Context(w.ctx).Do()
- w.err = err
- if err == nil {
- w.obj = newObject(resp)
- } else {
- pr.CloseWithError(w.err)
- }
- close(w.donec)
- }()
-}
-
-// Write appends to w.
-func (w *Writer) Write(p []byte) (n int, err error) {
- if w.err != nil {
- return 0, w.err
- }
- if !w.opened {
- w.open()
- }
- return w.pw.Write(p)
-}
-
-// Close completes the write operation and flushes any buffered data.
-// If Close doesn't return an error, metadata about the written object
-// can be retrieved by calling Object.
-func (w *Writer) Close() error {
- if !w.opened {
- w.open()
- }
- if err := w.pw.Close(); err != nil {
- return err
- }
- <-w.donec
- return w.err
-}
-
-// CloseWithError aborts the write operation with the provided error.
-// CloseWithError always returns nil.
-func (w *Writer) CloseWithError(err error) error {
- if !w.opened {
- return nil
- }
- return w.pw.CloseWithError(err)
-}
-
-// Object returns metadata about a successfully-written object.
-// It's only valid to call it after Close returns nil.
-func (w *Writer) Object() *Object {
- return w.obj
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml
deleted file mode 100644
index 055d6641c7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-before_install:
- - go get github.com/axw/gocov/gocov
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-
-install:
- - mkdir -p "$GOPATH/src/google.golang.org"
- - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
-
-script:
- - make test testrace
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md
deleted file mode 100644
index 407d384a7c..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# How to contribute
-
-We definitely welcome patches and contribution to grpc! Here is some guideline
-and information about how to do so.
-
-## Getting started
-
-### Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-### Filing Issues
-When filing an issue, make sure to answer these five questions:
-
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
-
-### Contributing code
-Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE
deleted file mode 100644
index f4988b4507..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2014, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile
deleted file mode 100644
index 12e84e4e5b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-.PHONY: \
- all \
- deps \
- updatedeps \
- testdeps \
- updatetestdeps \
- build \
- proto \
- test \
- testrace \
- clean \
-
-all: test testrace
-
-deps:
- go get -d -v google.golang.org/grpc/...
-
-updatedeps:
- go get -d -v -u -f google.golang.org/grpc/...
-
-testdeps:
- go get -d -v -t google.golang.org/grpc/...
-
-updatetestdeps:
- go get -d -v -t -u -f google.golang.org/grpc/...
-
-build: deps
- go build google.golang.org/grpc/...
-
-proto:
- @ if ! which protoc > /dev/null; then \
- echo "error: protoc not installed" >&2; \
- exit 1; \
- fi
- go get -v github.com/golang/protobuf/protoc-gen-go
- for file in $$(git ls-files '*.proto'); do \
- protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
- done
-
-test: testdeps
- go test -v -cpu 1,4 google.golang.org/grpc/...
-
-testrace: testdeps
- go test -v -race -cpu 1,4 google.golang.org/grpc/...
-
-clean:
- go clean google.golang.org/grpc/...
-
-coverage: testdeps
- ./coverage.sh --coveralls
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS
deleted file mode 100644
index 619f9dbfe6..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the GRPC project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of GRPC, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of GRPC. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of GRPC or any code incorporated within this
-implementation of GRPC constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of GRPC
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md
deleted file mode 100644
index 37b05f0953..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-#gRPC-Go
-
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
-
-The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
-
-Installation
-------------
-
-To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
-
-```
-$ go get google.golang.org/grpc
-```
-
-Prerequisites
--------------
-
-This requires Go 1.4 or above.
-
-Constraints
------------
-The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
-
-Documentation
--------------
-See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
-
-Status
-------
-Beta release
-
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go
deleted file mode 100644
index 504a6e18a8..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "io"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/transport"
-)
-
-// recvResponse receives and parses an RPC response.
-// On error, it returns the error and indicates whether the call should be retried.
-//
-// TODO(zhaoq): Check whether the received message sequence is valid.
-func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
- // Try to acquire header metadata from the server if there is any.
- var err error
- c.headerMD, err = stream.Header()
- if err != nil {
- return err
- }
- p := &parser{r: stream}
- for {
- if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- }
- c.trailerMD = stream.Trailer()
- return nil
-}
-
-// sendRequest writes out various information of an RPC such as Context and Message.
-func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
- stream, err := t.NewStream(ctx, callHdr)
- if err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- if _, ok := err.(transport.ConnectionError); !ok {
- t.CloseStream(stream, err)
- }
- }
- }()
- var cbuf *bytes.Buffer
- if compressor != nil {
- cbuf = new(bytes.Buffer)
- }
- outBuf, err := encode(codec, args, compressor, cbuf)
- if err != nil {
- return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
- }
- err = t.Write(stream, outBuf, opts)
- if err != nil {
- return nil, err
- }
- // Sent successfully.
- return stream, nil
-}
-
-// Invoke is called by the generated code. It sends the RPC request on the
-// wire and returns after response is received.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
- var c callInfo
- for _, o := range opts {
- if err := o.before(&c); err != nil {
- return toRPCErr(err)
- }
- }
- defer func() {
- for _, o := range opts {
- o.after(&c)
- }
- }()
- if EnableTracing {
- c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
- defer c.traceInfo.tr.Finish()
- c.traceInfo.firstLine.client = true
- if deadline, ok := ctx.Deadline(); ok {
- c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
- }
- c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
- // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
- defer func() {
- if err != nil {
- c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- c.traceInfo.tr.SetError()
- }
- }()
- }
- topts := &transport.Options{
- Last: true,
- Delay: false,
- }
- var (
- lastErr error // record the error that happened
- )
- for {
- var (
- err error
- t transport.ClientTransport
- stream *transport.Stream
- )
- // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.
- if lastErr != nil && c.failFast {
- return toRPCErr(lastErr)
- }
- callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- }
- if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- }
- t, err = cc.dopts.picker.Pick(ctx)
- if err != nil {
- if lastErr != nil {
- // This was a retry; return the error from the last attempt.
- return toRPCErr(lastErr)
- }
- return toRPCErr(err)
- }
- if c.traceInfo.tr != nil {
- c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
- }
- stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
- if err != nil {
- if _, ok := err.(transport.ConnectionError); ok {
- lastErr = err
- continue
- }
- if lastErr != nil {
- return toRPCErr(lastErr)
- }
- return toRPCErr(err)
- }
- // Receive the response
- lastErr = recvResponse(cc.dopts, t, &c, stream, reply)
- if _, ok := lastErr.(transport.ConnectionError); ok {
- continue
- }
- if c.traceInfo.tr != nil {
- c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
- }
- t.CloseStream(stream, lastErr)
- if lastErr != nil {
- return toRPCErr(lastErr)
- }
- return Errorf(stream.StatusCode(), stream.StatusDesc())
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go
deleted file mode 100644
index e2264236f6..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "errors"
- "fmt"
- "net"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/transport"
-)
-
-var (
- // ErrUnspecTarget indicates that the target address is unspecified.
- ErrUnspecTarget = errors.New("grpc: target is unspecified")
- // ErrNoTransportSecurity indicates that there is no transport security
- // being set for ClientConn. Users should either set one or explicitly
- // call WithInsecure DialOption to disable security.
- ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
- // ErrCredentialsMisuse indicates that users want to transmit security information
- // (e.g., oauth2 token) which requires secure connection on an insecure
- // connection.
- ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
- // ErrClientConnClosing indicates that the operation is illegal because
- // the session is closing.
- ErrClientConnClosing = errors.New("grpc: the client connection is closing")
- // ErrClientConnTimeout indicates that the connection could not be
- // established or re-established within the specified timeout.
- ErrClientConnTimeout = errors.New("grpc: timed out trying to connect")
- // minimum time to give a connection to complete
- minConnectTimeout = 20 * time.Second
-)
-
-// dialOptions configure a Dial call. dialOptions are set by the DialOption
-// values passed to Dial.
-type dialOptions struct {
- codec Codec
- cp Compressor
- dc Decompressor
- picker Picker
- block bool
- insecure bool
- copts transport.ConnectOptions
-}
-
-// DialOption configures how we set up the connection.
-type DialOption func(*dialOptions)
-
-// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
-func WithCodec(c Codec) DialOption {
- return func(o *dialOptions) {
- o.codec = c
- }
-}
-
-// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
-// compressor.
-func WithCompressor(cp Compressor) DialOption {
- return func(o *dialOptions) {
- o.cp = cp
- }
-}
-
-// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
-// message decompressor.
-func WithDecompressor(dc Decompressor) DialOption {
- return func(o *dialOptions) {
- o.dc = dc
- }
-}
-
-// WithPicker returns a DialOption which sets a picker for connection selection.
-func WithPicker(p Picker) DialOption {
- return func(o *dialOptions) {
- o.picker = p
- }
-}
-
-// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
-// connection is up. Without this, Dial returns immediately and connecting the server
-// happens in background.
-func WithBlock() DialOption {
- return func(o *dialOptions) {
- o.block = true
- }
-}
-
-// WithInsecure returns a DialOption which disables transport security for this ClientConn.
-// Note that transport security is required unless WithInsecure is set.
-func WithInsecure() DialOption {
- return func(o *dialOptions) {
- o.insecure = true
- }
-}
-
-// WithTransportCredentials returns a DialOption which configures a
-// connection level security credentials (e.g., TLS/SSL).
-func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {
- return func(o *dialOptions) {
- o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
- }
-}
-
-// WithPerRPCCredentials returns a DialOption which sets
-// credentials which will place auth state on each outbound RPC.
-func WithPerRPCCredentials(creds credentials.Credentials) DialOption {
- return func(o *dialOptions) {
- o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
- }
-}
-
-// WithTimeout returns a DialOption that configures a timeout for dialing a client connection.
-func WithTimeout(d time.Duration) DialOption {
- return func(o *dialOptions) {
- o.copts.Timeout = d
- }
-}
-
-// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
-func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {
- return func(o *dialOptions) {
- o.copts.Dialer = f
- }
-}
-
-// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
-func WithUserAgent(s string) DialOption {
- return func(o *dialOptions) {
- o.copts.UserAgent = s
- }
-}
-
-// Dial creates a client connection the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- cc := &ClientConn{
- target: target,
- }
- for _, opt := range opts {
- opt(&cc.dopts)
- }
- if cc.dopts.codec == nil {
- // Set the default codec.
- cc.dopts.codec = protoCodec{}
- }
- if cc.dopts.picker == nil {
- cc.dopts.picker = &unicastPicker{
- target: target,
- }
- }
- if err := cc.dopts.picker.Init(cc); err != nil {
- return nil, err
- }
- colonPos := strings.LastIndex(target, ":")
- if colonPos == -1 {
- colonPos = len(target)
- }
- cc.authority = target[:colonPos]
- return cc, nil
-}
-
-// ConnectivityState indicates the state of a client connection.
-type ConnectivityState int
-
-const (
- // Idle indicates the ClientConn is idle.
- Idle ConnectivityState = iota
- // Connecting indicates the ClienConn is connecting.
- Connecting
- // Ready indicates the ClientConn is ready for work.
- Ready
- // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
- TransientFailure
- // Shutdown indicates the ClientConn has started shutting down.
- Shutdown
-)
-
-func (s ConnectivityState) String() string {
- switch s {
- case Idle:
- return "IDLE"
- case Connecting:
- return "CONNECTING"
- case Ready:
- return "READY"
- case TransientFailure:
- return "TRANSIENT_FAILURE"
- case Shutdown:
- return "SHUTDOWN"
- default:
- panic(fmt.Sprintf("unknown connectivity state: %d", s))
- }
-}
-
-// ClientConn represents a client connection to an RPC service.
-type ClientConn struct {
- target string
- authority string
- dopts dialOptions
-}
-
-// State returns the connectivity state of cc.
-// This is EXPERIMENTAL API.
-func (cc *ClientConn) State() (ConnectivityState, error) {
- return cc.dopts.picker.State()
-}
-
-// WaitForStateChange blocks until the state changes to something other than the sourceState.
-// It returns the new state or error.
-// This is EXPERIMENTAL API.
-func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
- return cc.dopts.picker.WaitForStateChange(ctx, sourceState)
-}
-
-// Close starts to tear down the ClientConn.
-func (cc *ClientConn) Close() error {
- return cc.dopts.picker.Close()
-}
-
-// Conn is a client connection to a single destination.
-type Conn struct {
- target string
- dopts dialOptions
- resetChan chan int
- shutdownChan chan struct{}
- events trace.EventLog
-
- mu sync.Mutex
- state ConnectivityState
- stateCV *sync.Cond
- // ready is closed and becomes nil when a new transport is up or failed
- // due to timeout.
- ready chan struct{}
- transport transport.ClientTransport
-}
-
-// NewConn creates a Conn.
-func NewConn(cc *ClientConn) (*Conn, error) {
- if cc.target == "" {
- return nil, ErrUnspecTarget
- }
- c := &Conn{
- target: cc.target,
- dopts: cc.dopts,
- resetChan: make(chan int, 1),
- shutdownChan: make(chan struct{}),
- }
- if EnableTracing {
- c.events = trace.NewEventLog("grpc.ClientConn", c.target)
- }
- if !c.dopts.insecure {
- var ok bool
- for _, cd := range c.dopts.copts.AuthOptions {
- if _, ok := cd.(credentials.TransportAuthenticator); !ok {
- continue
- }
- ok = true
- }
- if !ok {
- return nil, ErrNoTransportSecurity
- }
- } else {
- for _, cd := range c.dopts.copts.AuthOptions {
- if cd.RequireTransportSecurity() {
- return nil, ErrCredentialsMisuse
- }
- }
- }
- c.stateCV = sync.NewCond(&c.mu)
- if c.dopts.block {
- if err := c.resetTransport(false); err != nil {
- c.Close()
- return nil, err
- }
- // Start to monitor the error status of transport.
- go c.transportMonitor()
- } else {
- // Start a goroutine connecting to the server asynchronously.
- go func() {
- if err := c.resetTransport(false); err != nil {
- grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err)
- c.Close()
- return
- }
- c.transportMonitor()
- }()
- }
- return c, nil
-}
-
-// printf records an event in cc's event log, unless cc has been closed.
-// REQUIRES cc.mu is held.
-func (cc *Conn) printf(format string, a ...interface{}) {
- if cc.events != nil {
- cc.events.Printf(format, a...)
- }
-}
-
-// errorf records an error in cc's event log, unless cc has been closed.
-// REQUIRES cc.mu is held.
-func (cc *Conn) errorf(format string, a ...interface{}) {
- if cc.events != nil {
- cc.events.Errorf(format, a...)
- }
-}
-
-// State returns the connectivity state of the Conn
-func (cc *Conn) State() ConnectivityState {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cc.state
-}
-
-// WaitForStateChange blocks until the state changes to something other than the sourceState.
-func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if sourceState != cc.state {
- return cc.state, nil
- }
- done := make(chan struct{})
- var err error
- go func() {
- select {
- case <-ctx.Done():
- cc.mu.Lock()
- err = ctx.Err()
- cc.stateCV.Broadcast()
- cc.mu.Unlock()
- case <-done:
- }
- }()
- defer close(done)
- for sourceState == cc.state {
- cc.stateCV.Wait()
- if err != nil {
- return cc.state, err
- }
- }
- return cc.state, nil
-}
-
-// NotifyReset tries to signal the underlying transport needs to be reset due to
-// for example a name resolution change in flight.
-func (cc *Conn) NotifyReset() {
- select {
- case cc.resetChan <- 0:
- default:
- }
-}
-
-func (cc *Conn) resetTransport(closeTransport bool) error {
- var retries int
- start := time.Now()
- for {
- cc.mu.Lock()
- cc.printf("connecting")
- if cc.state == Shutdown {
- // cc.Close() has been invoked.
- cc.mu.Unlock()
- return ErrClientConnClosing
- }
- cc.state = Connecting
- cc.stateCV.Broadcast()
- cc.mu.Unlock()
- if closeTransport {
- cc.transport.Close()
- }
- // Adjust timeout for the current try.
- copts := cc.dopts.copts
- if copts.Timeout < 0 {
- cc.Close()
- return ErrClientConnTimeout
- }
- if copts.Timeout > 0 {
- copts.Timeout -= time.Since(start)
- if copts.Timeout <= 0 {
- cc.Close()
- return ErrClientConnTimeout
- }
- }
- sleepTime := backoff(retries)
- timeout := sleepTime
- if timeout < minConnectTimeout {
- timeout = minConnectTimeout
- }
- if copts.Timeout == 0 || copts.Timeout > timeout {
- copts.Timeout = timeout
- }
- connectTime := time.Now()
- addr, err := cc.dopts.picker.PickAddr()
- var newTransport transport.ClientTransport
- if err == nil {
- newTransport, err = transport.NewClientTransport(addr, &copts)
- }
- if err != nil {
- cc.mu.Lock()
- if cc.state == Shutdown {
- // cc.Close() has been invoked.
- cc.mu.Unlock()
- return ErrClientConnClosing
- }
- cc.errorf("transient failure: %v", err)
- cc.state = TransientFailure
- cc.stateCV.Broadcast()
- if cc.ready != nil {
- close(cc.ready)
- cc.ready = nil
- }
- cc.mu.Unlock()
- sleepTime -= time.Since(connectTime)
- if sleepTime < 0 {
- sleepTime = 0
- }
- // Fail early before falling into sleep.
- if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {
- cc.mu.Lock()
- cc.errorf("connection timeout")
- cc.mu.Unlock()
- cc.Close()
- return ErrClientConnTimeout
- }
- closeTransport = false
- time.Sleep(sleepTime)
- retries++
- grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target)
- continue
- }
- cc.mu.Lock()
- cc.printf("ready")
- if cc.state == Shutdown {
- // cc.Close() has been invoked.
- cc.mu.Unlock()
- newTransport.Close()
- return ErrClientConnClosing
- }
- cc.state = Ready
- cc.stateCV.Broadcast()
- cc.transport = newTransport
- if cc.ready != nil {
- close(cc.ready)
- cc.ready = nil
- }
- cc.mu.Unlock()
- return nil
- }
-}
-
-func (cc *Conn) reconnect() bool {
- cc.mu.Lock()
- if cc.state == Shutdown {
- // cc.Close() has been invoked.
- cc.mu.Unlock()
- return false
- }
- cc.state = TransientFailure
- cc.stateCV.Broadcast()
- cc.mu.Unlock()
- if err := cc.resetTransport(true); err != nil {
- // The ClientConn is closing.
- cc.mu.Lock()
- cc.printf("transport exiting: %v", err)
- cc.mu.Unlock()
- grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err)
- return false
- }
- return true
-}
-
-// Run in a goroutine to track the error in transport and create the
-// new transport if an error happens. It returns when the channel is closing.
-func (cc *Conn) transportMonitor() {
- for {
- select {
- // shutdownChan is needed to detect the teardown when
- // the ClientConn is idle (i.e., no RPC in flight).
- case <-cc.shutdownChan:
- return
- case <-cc.resetChan:
- if !cc.reconnect() {
- return
- }
- case <-cc.transport.Error():
- if !cc.reconnect() {
- return
- }
- // Tries to drain reset signal if there is any since it is out-dated.
- select {
- case <-cc.resetChan:
- default:
- }
- }
- }
-}
-
-// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
-func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
- for {
- cc.mu.Lock()
- switch {
- case cc.state == Shutdown:
- cc.mu.Unlock()
- return nil, ErrClientConnClosing
- case cc.state == Ready:
- ct := cc.transport
- cc.mu.Unlock()
- return ct, nil
- default:
- ready := cc.ready
- if ready == nil {
- ready = make(chan struct{})
- cc.ready = ready
- }
- cc.mu.Unlock()
- select {
- case <-ctx.Done():
- return nil, transport.ContextErr(ctx.Err())
- // Wait until the new transport is ready or failed.
- case <-ready:
- }
- }
- }
-}
-
-// Close starts to tear down the Conn. Returns ErrClientConnClosing if
-// it has been closed (mostly due to dial time-out).
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many ClientConn's in a
-// tight loop.
-func (cc *Conn) Close() error {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cc.state == Shutdown {
- return ErrClientConnClosing
- }
- cc.state = Shutdown
- cc.stateCV.Broadcast()
- if cc.events != nil {
- cc.events.Finish()
- cc.events = nil
- }
- if cc.ready != nil {
- close(cc.ready)
- cc.ready = nil
- }
- if cc.transport != nil {
- cc.transport.Close()
- }
- if cc.shutdownChan != nil {
- close(cc.shutdownChan)
- }
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100644
index b009488842..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go
deleted file mode 100644
index e6762d0845..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// generated by stringer -type=Code; DO NOT EDIT
-
-package codes
-
-import "fmt"
-
-const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
-
-var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
-
-func (i Code) String() string {
- if i+1 >= Code(len(_Code_index)) {
- return fmt.Sprintf("Code(%d)", i)
- }
- return _Code_name[_Code_index[i]:_Code_index[i+1]]
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go
deleted file mode 100644
index 37c5b860bd..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package codes defines the canonical error codes used by gRPC. It is
-// consistent across various languages.
-package codes
-
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
-type Code uint32
-
-//go:generate stringer -type=Code
-
-const (
- // OK is returned on success.
- OK Code = 0
-
- // Canceled indicates the operation was cancelled (typically by the caller).
- Canceled Code = 1
-
- // Unknown error. An example of where this error may be returned is
- // if a Status value received from another address space belongs to
- // an error-space that is not known in this address space. Also
- // errors raised by APIs that do not return enough error information
- // may be converted to this error.
- Unknown Code = 2
-
- // InvalidArgument indicates client specified an invalid argument.
- // Note that this differs from FailedPrecondition. It indicates arguments
- // that are problematic regardless of the state of the system
- // (e.g., a malformed file name).
- InvalidArgument Code = 3
-
- // DeadlineExceeded means operation expired before completion.
- // For operations that change the state of the system, this error may be
- // returned even if the operation has completed successfully. For
- // example, a successful response from a server could have been delayed
- // long enough for the deadline to expire.
- DeadlineExceeded Code = 4
-
- // NotFound means some requested entity (e.g., file or directory) was
- // not found.
- NotFound Code = 5
-
- // AlreadyExists means an attempt to create an entity failed because one
- // already exists.
- AlreadyExists Code = 6
-
- // PermissionDenied indicates the caller does not have permission to
- // execute the specified operation. It must not be used for rejections
- // caused by exhausting some resource (use ResourceExhausted
- // instead for those errors). It must not be
- // used if the caller cannot be identified (use Unauthenticated
- // instead for those errors).
- PermissionDenied Code = 7
-
- // Unauthenticated indicates the request does not have valid
- // authentication credentials for the operation.
- Unauthenticated Code = 16
-
- // ResourceExhausted indicates some resource has been exhausted, perhaps
- // a per-user quota, or perhaps the entire file system is out of space.
- ResourceExhausted Code = 8
-
- // FailedPrecondition indicates operation was rejected because the
- // system is not in a state required for the operation's execution.
- // For example, directory to be deleted may be non-empty, an rmdir
- // operation is applied to a non-directory, etc.
- //
- // A litmus test that may help a service implementor in deciding
- // between FailedPrecondition, Aborted, and Unavailable:
- // (a) Use Unavailable if the client can retry just the failing call.
- // (b) Use Aborted if the client should retry at a higher-level
- // (e.g., restarting a read-modify-write sequence).
- // (c) Use FailedPrecondition if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
- // fails because the directory is non-empty, FailedPrecondition
- // should be returned since the client should not retry unless
- // they have first fixed up the directory by deleting files from it.
- // (d) Use FailedPrecondition if the client performs conditional
- // REST Get/Update/Delete on a resource and the resource on the
- // server does not match the condition. E.g., conflicting
- // read-modify-write on the same resource.
- FailedPrecondition Code = 9
-
- // Aborted indicates the operation was aborted, typically due to a
- // concurrency issue like sequencer check failures, transaction aborts,
- // etc.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- Aborted Code = 10
-
- // OutOfRange means operation was attempted past the valid range.
- // E.g., seeking or reading past end of file.
- //
- // Unlike InvalidArgument, this error indicates a problem that may
- // be fixed if the system state changes. For example, a 32-bit file
- // system will generate InvalidArgument if asked to read at an
- // offset that is not in the range [0,2^32-1], but it will generate
- // OutOfRange if asked to read from an offset past the current
- // file size.
- //
- // There is a fair bit of overlap between FailedPrecondition and
- // OutOfRange. We recommend using OutOfRange (the more specific
- // error) when it applies so that callers who are iterating through
- // a space can easily look for an OutOfRange error to detect when
- // they are done.
- OutOfRange Code = 11
-
- // Unimplemented indicates operation is not implemented or not
- // supported/enabled in this service.
- Unimplemented Code = 12
-
- // Internal errors. Means some invariants expected by underlying
- // system has been broken. If you see one of these errors,
- // something is very broken.
- Internal Code = 13
-
- // Unavailable indicates the service is currently unavailable.
- // This is a most likely a transient condition and may be corrected
- // by retrying with a backoff.
- //
- // See litmus test above for deciding between FailedPrecondition,
- // Aborted, and Unavailable.
- Unavailable Code = 14
-
- // DataLoss indicates unrecoverable data loss or corruption.
- DataLoss Code = 15
-)
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh
deleted file mode 100644
index 120235374a..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-set -e
-
-workdir=.cover
-profile="$workdir/cover.out"
-mode=set
-end2endtest="google.golang.org/grpc/test"
-
-generate_cover_data() {
- rm -rf "$workdir"
- mkdir "$workdir"
-
- for pkg in "$@"; do
- if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
- then
- f="$workdir/$(echo $pkg | tr / -)"
- go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
- go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
- fi
- done
-
- echo "mode: $mode" >"$profile"
- grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
-}
-
-show_cover_report() {
- go tool cover -${1}="$profile"
-}
-
-push_to_coveralls() {
- goveralls -coverprofile="$profile"
-}
-
-generate_cover_data $(go list ./...)
-show_cover_report func
-case "$1" in
-"")
- ;;
---html)
- show_cover_report html ;;
---coveralls)
- push_to_coveralls ;;
-*)
- echo >&2 "error: invalid option: $1" ;;
-esac
-rm -rf "$workdir"
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go
deleted file mode 100644
index 0b0b89b6aa..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package credentials implements various credentials supported by gRPC library,
-// which encapsulate all the state needed by a client to authenticate with a
-// server and make various assertions, e.g., about the client's identity, role,
-// or whether it is authorized to make a particular call.
-package credentials
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net"
- "strings"
- "time"
-
- "golang.org/x/net/context"
-)
-
-var (
- // alpnProtoStr are the specified application level protocols for gRPC.
- alpnProtoStr = []string{"h2"}
-)
-
-// Credentials defines the common interface all supported credentials must
-// implement.
-type Credentials interface {
- // GetRequestMetadata gets the current request metadata, refreshing
- // tokens if required. This should be called by the transport layer on
- // each request, and the data should be populated in headers or other
- // context. uri is the URI of the entry point for the request. When
- // supported by the underlying implementation, ctx can be used for
- // timeout and cancellation.
- // TODO(zhaoq): Define the set of the qualified keys instead of leaving
- // it as an arbitrary string.
- GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
- // RequireTransportSecurity indicates whether the credentails requires
- // transport security.
- RequireTransportSecurity() bool
-}
-
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, etc.
-type ProtocolInfo struct {
- // ProtocolVersion is the gRPC wire protocol version.
- ProtocolVersion string
- // SecurityProtocol is the security protocol in use.
- SecurityProtocol string
- // SecurityVersion is the security protocol version.
- SecurityVersion string
-}
-
-// AuthInfo defines the common interface for the auth information the users are interested in.
-type AuthInfo interface {
- AuthType() string
-}
-
-// TransportAuthenticator defines the common interface for all the live gRPC wire
-// protocols and supported transport security protocols (e.g., TLS, SSL).
-type TransportAuthenticator interface {
- // ClientHandshake does the authentication handshake specified by the corresponding
- // authentication protocol on rawConn for clients. It returns the authenticated
- // connection and the corresponding auth information about the connection.
- ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
- // ServerHandshake does the authentication handshake for servers. It returns
- // the authenticated connection and the corresponding auth information about
- // the connection.
- ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
- // Info provides the ProtocolInfo of this TransportAuthenticator.
- Info() ProtocolInfo
- Credentials
-}
-
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
- State tls.ConnectionState
-}
-
-func (t TLSInfo) AuthType() string {
- return "tls"
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
- // TLS configuration
- config tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
- return ProtocolInfo{
- SecurityProtocol: "tls",
- SecurityVersion: "1.2",
- }
-}
-
-// GetRequestMetadata returns nil, nil since TLS credentials does not have
-// metadata.
-func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
- return nil, nil
-}
-
-func (c *tlsCreds) RequireTransportSecurity() bool {
- return true
-}
-
-type timeoutError struct{}
-
-func (timeoutError) Error() string { return "credentials: Dial timed out" }
-func (timeoutError) Timeout() bool { return true }
-func (timeoutError) Temporary() bool { return true }
-
-func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
- // borrow some code from tls.DialWithDialer
- var errChannel chan error
- if timeout != 0 {
- errChannel = make(chan error, 2)
- time.AfterFunc(timeout, func() {
- errChannel <- timeoutError{}
- })
- }
- if c.config.ServerName == "" {
- colonPos := strings.LastIndex(addr, ":")
- if colonPos == -1 {
- colonPos = len(addr)
- }
- c.config.ServerName = addr[:colonPos]
- }
- conn := tls.Client(rawConn, &c.config)
- if timeout == 0 {
- err = conn.Handshake()
- } else {
- go func() {
- errChannel <- conn.Handshake()
- }()
- err = <-errChannel
- }
- if err != nil {
- rawConn.Close()
- return nil, nil, err
- }
- // TODO(zhaoq): Omit the auth info for client now. It is more for
- // information than anything else.
- return conn, nil, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
- conn := tls.Server(rawConn, &c.config)
- if err := conn.Handshake(); err != nil {
- rawConn.Close()
- return nil, nil, err
- }
- return conn, TLSInfo{conn.ConnectionState()}, nil
-}
-
-// NewTLS uses c to construct a TransportAuthenticator based on TLS.
-func NewTLS(c *tls.Config) TransportAuthenticator {
- tc := &tlsCreds{*c}
- tc.config.NextProtos = alpnProtoStr
- return tc
-}
-
-// NewClientTLSFromCert constructs a TLS from the input certificate for client.
-func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {
- return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
-func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {
- b, err := ioutil.ReadFile(certFile)
- if err != nil {
- return nil, err
- }
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM(b) {
- return nil, fmt.Errorf("credentials: failed to append certificates")
- }
- return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs a TLS from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs a TLS from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
- }
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go
deleted file mode 100644
index b4c0e740e9..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-/*
-Package grpc implements an RPC system called gRPC.
-
-See www.grpc.io for more information about gRPC.
-*/
-package grpc
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go
deleted file mode 100644
index 2cc09be489..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
-Package grpclog defines logging for grpc.
-*/
-package grpclog
-
-import (
- "log"
- "os"
-)
-
-// Use golang's standard logger by default.
-// Access is not mutex-protected: do not modify except in init()
-// functions.
-var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
-
-// Logger mimics golang's standard Logger as an interface.
-type Logger interface {
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Fatalln(args ...interface{})
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-}
-
-// SetLogger sets the logger that is used in grpc. Call only from
-// init() functions.
-func SetLogger(l Logger) {
- logger = l
-}
-
-// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
-func Fatal(args ...interface{}) {
- logger.Fatal(args...)
-}
-
-// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
-func Fatalf(format string, args ...interface{}) {
- logger.Fatalf(format, args...)
-}
-
-// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
-func Fatalln(args ...interface{}) {
- logger.Fatalln(args...)
-}
-
-// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
-func Print(args ...interface{}) {
- logger.Print(args...)
-}
-
-// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
-func Printf(format string, args ...interface{}) {
- logger.Printf(format, args...)
-}
-
-// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
-func Println(args ...interface{}) {
- logger.Println(args...)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go
deleted file mode 100644
index 5489143a85..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package internal contains gRPC-internal code for testing, to avoid polluting
-// the godoc of the top-level grpc package.
-package internal
-
-// TestingCloseConns closes all existing transports but keeps
-// grpcServer.lis accepting new connections.
-//
-// The provided grpcServer must be of type *grpc.Server. It is untyped
-// for circular dependency reasons.
-var TestingCloseConns func(grpcServer interface{})
-
-// TestingUseHandlerImpl enables the http.Handler-based server implementation.
-// It must be called before Serve and requires TLS credentials.
-//
-// The provided grpcServer must be of type *grpc.Server. It is untyped
-// for circular dependency reasons.
-var TestingUseHandlerImpl func(grpcServer interface{})
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go
deleted file mode 100644
index 58469ddd3f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package metadata define the structure of the metadata supported by gRPC library.
-package metadata
-
-import (
- "encoding/base64"
- "fmt"
- "strings"
-
- "golang.org/x/net/context"
-)
-
-const (
- binHdrSuffix = "-bin"
-)
-
-// encodeKeyValue encodes key and value qualified for transmission via gRPC.
-// Transmitting binary headers violates HTTP/2 spec.
-// TODO(zhaoq): Maybe check if k is ASCII also.
-func encodeKeyValue(k, v string) (string, string) {
- k = strings.ToLower(k)
- if strings.HasSuffix(k, binHdrSuffix) {
- val := base64.StdEncoding.EncodeToString([]byte(v))
- v = string(val)
- }
- return k, v
-}
-
-// DecodeKeyValue returns the original key and value corresponding to the
-// encoded data in k, v.
-func DecodeKeyValue(k, v string) (string, string, error) {
- if !strings.HasSuffix(k, binHdrSuffix) {
- return k, v, nil
- }
- val, err := base64.StdEncoding.DecodeString(v)
- if err != nil {
- return "", "", err
- }
- return k, string(val), nil
-}
-
-// MD is a mapping from metadata keys to values. Users should use the following
-// two convenience functions New and Pairs to generate MD.
-type MD map[string][]string
-
-// New creates a MD from given key-value map.
-func New(m map[string]string) MD {
- md := MD{}
- for k, v := range m {
- key, val := encodeKeyValue(k, v)
- md[key] = append(md[key], val)
- }
- return md
-}
-
-// Pairs returns an MD formed by the mapping of key, value ...
-// Pairs panics if len(kv) is odd.
-func Pairs(kv ...string) MD {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
- }
- md := MD{}
- var k string
- for i, s := range kv {
- if i%2 == 0 {
- k = s
- continue
- }
- key, val := encodeKeyValue(k, s)
- md[key] = append(md[key], val)
- }
- return md
-}
-
-// Len returns the number of items in md.
-func (md MD) Len() int {
- return len(md)
-}
-
-// Copy returns a copy of md.
-func (md MD) Copy() MD {
- out := MD{}
- for k, v := range md {
- for _, i := range v {
- out[k] = append(out[k], i)
- }
- }
- return out
-}
-
-type mdKey struct{}
-
-// NewContext creates a new context with md attached.
-func NewContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdKey{}, md)
-}
-
-// FromContext returns the MD in ctx if it exists.
-func FromContext(ctx context.Context) (md MD, ok bool) {
- md, ok = ctx.Value(mdKey{}).(MD)
- return
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index 06605607c3..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-// The interface is EXPERIMENTAL and may be suject to change.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-type Operation uint8
-
-const (
- // Add indicates a new address is added.
- Add Operation = iota
- // Delete indicates an exisiting address is deleted.
- Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-type Update struct {
- // Op indicates the operation of the update.
- Op Operation
- // Addr is the updated address. It is empty string if there is no address update.
- Addr string
- // Metadata is the updated metadata. It is nil if there is no metadata update.
- // Metadata is not required for a custom naming implementation.
- Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-type Resolver interface {
- // Resolve creates a Watcher for target.
- Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-type Watcher interface {
- // Next blocks until an update or error happens. It may return one or more
- // updates. The first call should get the full set of the results.
- Next() ([]*Update, error)
- // Close closes the Watcher.
- Close()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go
deleted file mode 100644
index bfa6205ba9..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// Package peer defines various peer information associated with RPCs and
-// corresponding utils.
-package peer
-
-import (
- "net"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/credentials"
-)
-
-// Peer contains the information of the peer for an RPC.
-type Peer struct {
- // Addr is the peer address.
- Addr net.Addr
- // AuthInfo is the authentication information of the transport.
- // It is nil if there is no transport security being used.
- AuthInfo credentials.AuthInfo
-}
-
-type peerKey struct{}
-
-// NewContext creates a new context with peer information attached.
-func NewContext(ctx context.Context, p *Peer) context.Context {
- return context.WithValue(ctx, peerKey{}, p)
-}
-
-// FromContext returns the peer information in ctx if it exists.
-func FromContext(ctx context.Context) (p *Peer, ok bool) {
- p, ok = ctx.Value(peerKey{}).(*Peer)
- return
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go
deleted file mode 100644
index 50f315b44f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "container/list"
- "fmt"
- "sync"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/naming"
- "google.golang.org/grpc/transport"
-)
-
-// Picker picks a Conn for RPC requests.
-// This is EXPERIMENTAL and please do not implement your own Picker for now.
-type Picker interface {
- // Init does initial processing for the Picker, e.g., initiate some connections.
- Init(cc *ClientConn) error
- // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
- // or some error happens.
- Pick(ctx context.Context) (transport.ClientTransport, error)
- // PickAddr picks a peer address for connecting. This will be called repeated for
- // connecting/reconnecting.
- PickAddr() (string, error)
- // State returns the connectivity state of the underlying connections.
- State() (ConnectivityState, error)
- // WaitForStateChange blocks until the state changes to something other than
- // the sourceState. It returns the new state or error.
- WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error)
- // Close closes all the Conn's owned by this Picker.
- Close() error
-}
-
-// unicastPicker is the default Picker which is used when there is no custom Picker
-// specified by users. It always picks the same Conn.
-type unicastPicker struct {
- target string
- conn *Conn
-}
-
-func (p *unicastPicker) Init(cc *ClientConn) error {
- c, err := NewConn(cc)
- if err != nil {
- return err
- }
- p.conn = c
- return nil
-}
-
-func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) {
- return p.conn.Wait(ctx)
-}
-
-func (p *unicastPicker) PickAddr() (string, error) {
- return p.target, nil
-}
-
-func (p *unicastPicker) State() (ConnectivityState, error) {
- return p.conn.State(), nil
-}
-
-func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
- return p.conn.WaitForStateChange(ctx, sourceState)
-}
-
-func (p *unicastPicker) Close() error {
- if p.conn != nil {
- return p.conn.Close()
- }
- return nil
-}
-
-// unicastNamingPicker picks an address from a name resolver to set up the connection.
-type unicastNamingPicker struct {
- cc *ClientConn
- resolver naming.Resolver
- watcher naming.Watcher
- mu sync.Mutex
- // The list of the addresses are obtained from watcher.
- addrs *list.List
- // It tracks the current picked addr by PickAddr(). The next PickAddr may
- // push it forward on addrs.
- pickedAddr *list.Element
- conn *Conn
-}
-
-// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver
-// to connect.
-func NewUnicastNamingPicker(r naming.Resolver) Picker {
- return &unicastNamingPicker{
- resolver: r,
- addrs: list.New(),
- }
-}
-
-type addrInfo struct {
- addr string
- // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call.
- deleting bool
-}
-
-// processUpdates calls Watcher.Next() once and processes the obtained updates.
-func (p *unicastNamingPicker) processUpdates() error {
- updates, err := p.watcher.Next()
- if err != nil {
- return err
- }
- for _, update := range updates {
- switch update.Op {
- case naming.Add:
- p.mu.Lock()
- p.addrs.PushBack(&addrInfo{
- addr: update.Addr,
- })
- p.mu.Unlock()
- // Initial connection setup
- if p.conn == nil {
- conn, err := NewConn(p.cc)
- if err != nil {
- return err
- }
- p.conn = conn
- }
- case naming.Delete:
- p.mu.Lock()
- for e := p.addrs.Front(); e != nil; e = e.Next() {
- if update.Addr == e.Value.(*addrInfo).addr {
- if e == p.pickedAddr {
- // Do not remove the element now if it is the current picked
- // one. We leave the deletion to the next PickAddr() call.
- e.Value.(*addrInfo).deleting = true
- // Notify Conn to close it. All the live RPCs on this connection
- // will be aborted.
- p.conn.NotifyReset()
- } else {
- p.addrs.Remove(e)
- }
- }
- }
- p.mu.Unlock()
- default:
- grpclog.Println("Unknown update.Op ", update.Op)
- }
- }
- return nil
-}
-
-// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher
-// is closed.
-func (p *unicastNamingPicker) monitor() {
- for {
- if err := p.processUpdates(); err != nil {
- return
- }
- }
-}
-
-func (p *unicastNamingPicker) Init(cc *ClientConn) error {
- w, err := p.resolver.Resolve(cc.target)
- if err != nil {
- return err
- }
- p.watcher = w
- p.cc = cc
- // Get the initial name resolution.
- if err := p.processUpdates(); err != nil {
- return err
- }
- go p.monitor()
- return nil
-}
-
-func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) {
- return p.conn.Wait(ctx)
-}
-
-func (p *unicastNamingPicker) PickAddr() (string, error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if p.pickedAddr == nil {
- p.pickedAddr = p.addrs.Front()
- } else {
- pa := p.pickedAddr
- p.pickedAddr = pa.Next()
- if pa.Value.(*addrInfo).deleting {
- p.addrs.Remove(pa)
- }
- if p.pickedAddr == nil {
- p.pickedAddr = p.addrs.Front()
- }
- }
- if p.pickedAddr == nil {
- return "", fmt.Errorf("there is no address available to pick")
- }
- return p.pickedAddr.Value.(*addrInfo).addr, nil
-}
-
-func (p *unicastNamingPicker) State() (ConnectivityState, error) {
- return 0, fmt.Errorf("State() is not supported for unicastNamingPicker")
-}
-
-func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
- return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker")
-}
-
-func (p *unicastNamingPicker) Close() error {
- p.watcher.Close()
- p.conn.Close()
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go
deleted file mode 100644
index 96c790bed1..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "math/rand"
- "os"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/transport"
-)
-
-// Codec defines the interface gRPC uses to encode and decode messages.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
- // String returns the name of the Codec implementation. The returned
- // string will be used as part of content type in transmission.
- String() string
-}
-
-// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC.
-type protoCodec struct{}
-
-func (protoCodec) Marshal(v interface{}) ([]byte, error) {
- return proto.Marshal(v.(proto.Message))
-}
-
-func (protoCodec) Unmarshal(data []byte, v interface{}) error {
- return proto.Unmarshal(data, v.(proto.Message))
-}
-
-func (protoCodec) String() string {
- return "proto"
-}
-
-// Compressor defines the interface gRPC uses to compress a message.
-type Compressor interface {
- // Do compresses p into w.
- Do(w io.Writer, p []byte) error
- // Type returns the compression algorithm the Compressor uses.
- Type() string
-}
-
-// NewGZIPCompressor creates a Compressor based on GZIP.
-func NewGZIPCompressor() Compressor {
- return &gzipCompressor{}
-}
-
-type gzipCompressor struct {
-}
-
-func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
- z := gzip.NewWriter(w)
- if _, err := z.Write(p); err != nil {
- return err
- }
- return z.Close()
-}
-
-func (c *gzipCompressor) Type() string {
- return "gzip"
-}
-
-// Decompressor defines the interface gRPC uses to decompress a message.
-type Decompressor interface {
- // Do reads the data from r and uncompress them.
- Do(r io.Reader) ([]byte, error)
- // Type returns the compression algorithm the Decompressor uses.
- Type() string
-}
-
-type gzipDecompressor struct {
-}
-
-// NewGZIPDecompressor creates a Decompressor based on GZIP.
-func NewGZIPDecompressor() Decompressor {
- return &gzipDecompressor{}
-}
-
-func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
- z, err := gzip.NewReader(r)
- if err != nil {
- return nil, err
- }
- defer z.Close()
- return ioutil.ReadAll(z)
-}
-
-func (d *gzipDecompressor) Type() string {
- return "gzip"
-}
-
-// callInfo contains all related configuration and information about an RPC.
-type callInfo struct {
- failFast bool
- headerMD metadata.MD
- trailerMD metadata.MD
- traceInfo traceInfo // in trace.go
-}
-
-// CallOption configures a Call before it starts or extracts information from
-// a Call after it completes.
-type CallOption interface {
- // before is called before the call is sent to any server. If before
- // returns a non-nil error, the RPC fails with that error.
- before(*callInfo) error
-
- // after is called after the call has completed. after cannot return an
- // error, so any failures should be reported via output parameters.
- after(*callInfo)
-}
-
-type beforeCall func(c *callInfo) error
-
-func (o beforeCall) before(c *callInfo) error { return o(c) }
-func (o beforeCall) after(c *callInfo) {}
-
-type afterCall func(c *callInfo)
-
-func (o afterCall) before(c *callInfo) error { return nil }
-func (o afterCall) after(c *callInfo) { o(c) }
-
-// Header returns a CallOptions that retrieves the header metadata
-// for a unary RPC.
-func Header(md *metadata.MD) CallOption {
- return afterCall(func(c *callInfo) {
- *md = c.headerMD
- })
-}
-
-// Trailer returns a CallOptions that retrieves the trailer metadata
-// for a unary RPC.
-func Trailer(md *metadata.MD) CallOption {
- return afterCall(func(c *callInfo) {
- *md = c.trailerMD
- })
-}
-
-// The format of the payload: compressed or not?
-type payloadFormat uint8
-
-const (
- compressionNone payloadFormat = iota // no compression
- compressionMade
-)
-
-// parser reads complelete gRPC messages from the underlying reader.
-type parser struct {
- // r is the underlying reader.
- // See the comment on recvMsg for the permissible
- // error types.
- r io.Reader
-
- // The header of a gRPC message. Find more detail
- // at http://www.grpc.io/docs/guides/wire.html.
- header [5]byte
-}
-
-// recvMsg reads a complete gRPC message from the stream.
-//
-// It returns the message and its payload (compression/encoding)
-// format. The caller owns the returned msg memory.
-//
-// If there is an error, possible values are:
-// * io.EOF, when no messages remain
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * of type transport.StreamError
-// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
-// error.
-func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
- if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
- return 0, nil, err
- }
-
- pf = payloadFormat(p.header[0])
- length := binary.BigEndian.Uint32(p.header[1:])
-
- if length == 0 {
- return pf, nil, nil
- }
- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
- // of making it for each message:
- msg = make([]byte, int(length))
- if _, err := io.ReadFull(p.r, msg); err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return 0, nil, err
- }
- return pf, msg, nil
-}
-
-// encode serializes msg and prepends the message header. If msg is nil, it
-// generates the message header of 0 message length.
-func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
- var b []byte
- var length uint
- if msg != nil {
- var err error
- // TODO(zhaoq): optimize to reduce memory alloc and copying.
- b, err = c.Marshal(msg)
- if err != nil {
- return nil, err
- }
- if cp != nil {
- if err := cp.Do(cbuf, b); err != nil {
- return nil, err
- }
- b = cbuf.Bytes()
- }
- length = uint(len(b))
- }
- if length > math.MaxUint32 {
- return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
- }
-
- const (
- payloadLen = 1
- sizeLen = 4
- )
-
- var buf = make([]byte, payloadLen+sizeLen+len(b))
-
- // Write payload format
- if cp == nil {
- buf[0] = byte(compressionNone)
- } else {
- buf[0] = byte(compressionMade)
- }
- // Write length of b into buf
- binary.BigEndian.PutUint32(buf[1:], uint32(length))
- // Copy encoded msg to buf
- copy(buf[5:], b)
-
- return buf, nil
-}
-
-func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
- switch pf {
- case compressionNone:
- case compressionMade:
- if recvCompress == "" {
- return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress)
- }
- if dc == nil || recvCompress != dc.Type() {
- return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
- }
- default:
- return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf)
- }
- return nil
-}
-
-func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error {
- pf, d, err := p.recvMsg()
- if err != nil {
- return err
- }
- if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
- return err
- }
- if pf == compressionMade {
- d, err = dc.Do(bytes.NewReader(d))
- if err != nil {
- return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
- }
- }
- if err := c.Unmarshal(d, m); err != nil {
- return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
- }
- return nil
-}
-
-// rpcError defines the status from an RPC.
-type rpcError struct {
- code codes.Code
- desc string
-}
-
-func (e rpcError) Error() string {
- return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc)
-}
-
-// Code returns the error code for err if it was produced by the rpc system.
-// Otherwise, it returns codes.Unknown.
-func Code(err error) codes.Code {
- if err == nil {
- return codes.OK
- }
- if e, ok := err.(rpcError); ok {
- return e.code
- }
- return codes.Unknown
-}
-
-// ErrorDesc returns the error description of err if it was produced by the rpc system.
-// Otherwise, it returns err.Error() or empty string when err is nil.
-func ErrorDesc(err error) string {
- if err == nil {
- return ""
- }
- if e, ok := err.(rpcError); ok {
- return e.desc
- }
- return err.Error()
-}
-
-// Errorf returns an error containing an error code and a description;
-// Errorf returns nil if c is OK.
-func Errorf(c codes.Code, format string, a ...interface{}) error {
- if c == codes.OK {
- return nil
- }
- return rpcError{
- code: c,
- desc: fmt.Sprintf(format, a...),
- }
-}
-
-// toRPCErr converts an error into a rpcError.
-func toRPCErr(err error) error {
- switch e := err.(type) {
- case rpcError:
- return err
- case transport.StreamError:
- return rpcError{
- code: e.Code,
- desc: e.Desc,
- }
- case transport.ConnectionError:
- return rpcError{
- code: codes.Internal,
- desc: e.Desc,
- }
- }
- return Errorf(codes.Unknown, "%v", err)
-}
-
-// convertCode converts a standard Go error into its canonical code. Note that
-// this is only used to translate the error returned by the server applications.
-func convertCode(err error) codes.Code {
- switch err {
- case nil:
- return codes.OK
- case io.EOF:
- return codes.OutOfRange
- case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
- return codes.FailedPrecondition
- case os.ErrInvalid:
- return codes.InvalidArgument
- case context.Canceled:
- return codes.Canceled
- case context.DeadlineExceeded:
- return codes.DeadlineExceeded
- }
- switch {
- case os.IsExist(err):
- return codes.AlreadyExists
- case os.IsNotExist(err):
- return codes.NotFound
- case os.IsPermission(err):
- return codes.PermissionDenied
- }
- return codes.Unknown
-}
-
-const (
- // how long to wait after the first failure before retrying
- baseDelay = 1.0 * time.Second
- // upper bound of backoff delay
- maxDelay = 120 * time.Second
- // backoff increases by this factor on each retry
- backoffFactor = 1.6
- // backoff is randomized downwards by this factor
- backoffJitter = 0.2
-)
-
-func backoff(retries int) (t time.Duration) {
- if retries == 0 {
- return baseDelay
- }
- backoff, max := float64(baseDelay), float64(maxDelay)
- for backoff < max && retries > 0 {
- backoff *= backoffFactor
- retries--
- }
- if backoff > max {
- backoff = max
- }
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= 1 + backoffJitter*(rand.Float64()*2-1)
- if backoff < 0 {
- return 0
- }
- return time.Duration(backoff)
-}
-
-// SupportPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the grpc package.
-//
-// This constant may be renamed in the future if a change in the generated code
-// requires a synchronised update of grpc-go and protoc-gen-go. This constant
-// should not be referenced from any other code.
-const SupportPackageIsVersion1 = true
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go
deleted file mode 100644
index bdf68a0fc9..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go
+++ /dev/null
@@ -1,746 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/transport"
-)
-
-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error)
-
-// MethodDesc represents an RPC service's method specification.
-type MethodDesc struct {
- MethodName string
- Handler methodHandler
-}
-
-// ServiceDesc represents an RPC service's specification.
-type ServiceDesc struct {
- ServiceName string
- // The pointer to the service interface. Used to check whether the user
- // provided implementation satisfies the interface requirements.
- HandlerType interface{}
- Methods []MethodDesc
- Streams []StreamDesc
-}
-
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
- server interface{} // the server for service methods
- md map[string]*MethodDesc
- sd map[string]*StreamDesc
-}
-
-// Server is a gRPC server to serve RPC requests.
-type Server struct {
- opts options
-
- mu sync.Mutex // guards following
- lis map[net.Listener]bool
- conns map[io.Closer]bool
- m map[string]*service // service name -> service info
- events trace.EventLog
-}
-
-type options struct {
- creds credentials.Credentials
- codec Codec
- cp Compressor
- dc Decompressor
- maxConcurrentStreams uint32
- useHandlerImpl bool // use http.Handler-based server
-}
-
-// A ServerOption sets options.
-type ServerOption func(*options)
-
-// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
-func CustomCodec(codec Codec) ServerOption {
- return func(o *options) {
- o.codec = codec
- }
-}
-
-func RPCCompressor(cp Compressor) ServerOption {
- return func(o *options) {
- o.cp = cp
- }
-}
-
-func RPCDecompressor(dc Decompressor) ServerOption {
- return func(o *options) {
- o.dc = dc
- }
-}
-
-// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
-// of concurrent streams to each ServerTransport.
-func MaxConcurrentStreams(n uint32) ServerOption {
- return func(o *options) {
- o.maxConcurrentStreams = n
- }
-}
-
-// Creds returns a ServerOption that sets credentials for server connections.
-func Creds(c credentials.Credentials) ServerOption {
- return func(o *options) {
- o.creds = c
- }
-}
-
-// NewServer creates a gRPC server which has no service registered and has not
-// started to accept requests yet.
-func NewServer(opt ...ServerOption) *Server {
- var opts options
- for _, o := range opt {
- o(&opts)
- }
- if opts.codec == nil {
- // Set the default codec.
- opts.codec = protoCodec{}
- }
- s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[io.Closer]bool),
- m: make(map[string]*service),
- }
- if EnableTracing {
- _, file, line, _ := runtime.Caller(1)
- s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
- }
- return s
-}
-
-// printf records an event in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) printf(format string, a ...interface{}) {
- if s.events != nil {
- s.events.Printf(format, a...)
- }
-}
-
-// errorf records an error in s's event log, unless s has been stopped.
-// REQUIRES s.mu is held.
-func (s *Server) errorf(format string, a ...interface{}) {
- if s.events != nil {
- s.events.Errorf(format, a...)
- }
-}
-
-// RegisterService register a service and its implementation to the gRPC
-// server. Called from the IDL generated code. This must be called before
-// invoking Serve.
-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
- ht := reflect.TypeOf(sd.HandlerType).Elem()
- st := reflect.TypeOf(ss)
- if !st.Implements(ht) {
- grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
- }
- s.register(sd, ss)
-}
-
-func (s *Server) register(sd *ServiceDesc, ss interface{}) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.printf("RegisterService(%q)", sd.ServiceName)
- if _, ok := s.m[sd.ServiceName]; ok {
- grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
- }
- srv := &service{
- server: ss,
- md: make(map[string]*MethodDesc),
- sd: make(map[string]*StreamDesc),
- }
- for i := range sd.Methods {
- d := &sd.Methods[i]
- srv.md[d.MethodName] = d
- }
- for i := range sd.Streams {
- d := &sd.Streams[i]
- srv.sd[d.StreamName] = d
- }
- s.m[sd.ServiceName] = srv
-}
-
-var (
- // ErrServerStopped indicates that the operation is now illegal because of
- // the server being stopped.
- ErrServerStopped = errors.New("grpc: the server has been stopped")
-)
-
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- creds, ok := s.opts.creds.(credentials.TransportAuthenticator)
- if !ok {
- return rawConn, nil, nil
- }
- return creds.ServerHandshake(rawConn)
-}
-
-// Serve accepts incoming connections on the listener lis, creating a new
-// ServerTransport and service goroutine for each. The service goroutines
-// read gRPC requests and then call the registered handlers to reply to them.
-// Service returns when lis.Accept fails.
-func (s *Server) Serve(lis net.Listener) error {
- s.mu.Lock()
- s.printf("serving")
- if s.lis == nil {
- s.mu.Unlock()
- return ErrServerStopped
- }
- s.lis[lis] = true
- s.mu.Unlock()
- defer func() {
- lis.Close()
- s.mu.Lock()
- delete(s.lis, lis)
- s.mu.Unlock()
- }()
- for {
- rawConn, err := lis.Accept()
- if err != nil {
- s.mu.Lock()
- s.printf("done serving; Accept = %v", err)
- s.mu.Unlock()
- return err
- }
- // Start a new goroutine to deal with rawConn
- // so we don't stall this Accept loop goroutine.
- go s.handleRawConn(rawConn)
- }
-}
-
-// handleRawConn is run in its own goroutine and handles a just-accepted
-// connection that has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
- conn, authInfo, err := s.useTransportAuthenticator(rawConn)
- if err != nil {
- s.mu.Lock()
- s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
- s.mu.Unlock()
- grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
- rawConn.Close()
- return
- }
-
- s.mu.Lock()
- if s.conns == nil {
- s.mu.Unlock()
- conn.Close()
- return
- }
- s.mu.Unlock()
-
- if s.opts.useHandlerImpl {
- s.serveUsingHandler(conn)
- } else {
- s.serveNewHTTP2Transport(conn, authInfo)
- }
-}
-
-// serveNewHTTP2Transport sets up a new http/2 transport (using the
-// gRPC http2 server transport in transport/http2_server.go) and
-// serves streams on it.
-// This is run in its own goroutine (it does network I/O in
-// transport.NewServerTransport).
-func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
- st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
- if err != nil {
- s.mu.Lock()
- s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
- s.mu.Unlock()
- c.Close()
- grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
- return
- }
- if !s.addConn(st) {
- st.Close()
- return
- }
- s.serveStreams(st)
-}
-
-func (s *Server) serveStreams(st transport.ServerTransport) {
- defer s.removeConn(st)
- defer st.Close()
- var wg sync.WaitGroup
- st.HandleStreams(func(stream *transport.Stream) {
- wg.Add(1)
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
- })
- wg.Wait()
-}
-
-var _ http.Handler = (*Server)(nil)
-
-// serveUsingHandler is called from handleRawConn when s is configured
-// to handle requests via the http.Handler interface. It sets up a
-// net/http.Server to handle the just-accepted conn. The http.Server
-// is configured to route all incoming requests (all HTTP/2 streams)
-// to ServeHTTP, which creates a new ServerTransport for each stream.
-// serveUsingHandler blocks until conn closes.
-//
-// This codepath is only used when Server.TestingUseHandlerImpl has
-// been configured. This lets the end2end tests exercise the ServeHTTP
-// method as one of the environment types.
-//
-// conn is the *tls.Conn that's already been authenticated.
-func (s *Server) serveUsingHandler(conn net.Conn) {
- if !s.addConn(conn) {
- conn.Close()
- return
- }
- defer s.removeConn(conn)
- h2s := &http2.Server{
- MaxConcurrentStreams: s.opts.maxConcurrentStreams,
- }
- h2s.ServeConn(conn, &http2.ServeConnOpts{
- Handler: s,
- })
-}
-
-func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- if !s.addConn(st) {
- st.Close()
- return
- }
- defer s.removeConn(st)
- s.serveStreams(st)
-}
-
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- if !EnableTracing {
- return nil
- }
- trInfo = &traceInfo{
- tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
- }
- trInfo.firstLine.client = false
- trInfo.firstLine.remoteAddr = st.RemoteAddr()
- stream.TraceContext(trInfo.tr)
- if dl, ok := stream.Context().Deadline(); ok {
- trInfo.firstLine.deadline = dl.Sub(time.Now())
- }
- return trInfo
-}
-
-func (s *Server) addConn(c io.Closer) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conns == nil {
- return false
- }
- s.conns[c] = true
- return true
-}
-
-func (s *Server) removeConn(c io.Closer) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conns != nil {
- delete(s.conns, c)
- }
-}
-
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
- var cbuf *bytes.Buffer
- if cp != nil {
- cbuf = new(bytes.Buffer)
- }
- p, err := encode(s.opts.codec, msg, cp, cbuf)
- if err != nil {
- // This typically indicates a fatal issue (e.g., memory
- // corruption or hardware faults) the application program
- // cannot handle.
- //
- // TODO(zhaoq): There exist other options also such as only closing the
- // faulty stream locally and remotely (Other streams can keep going). Find
- // the optimal option.
- grpclog.Fatalf("grpc: Server failed to encode response %v", err)
- }
- return t.Write(stream, p, opts)
-}
-
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
- if trInfo != nil {
- defer trInfo.tr.Finish()
- trInfo.firstLine.client = false
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- if err != nil && err != io.EOF {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- }()
- }
- p := &parser{r: stream}
- for {
- pf, req, err := p.recvMsg()
- if err == io.EOF {
- // The entire stream is done (for unary RPC only).
- return err
- }
- if err == io.ErrUnexpectedEOF {
- err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"}
- }
- if err != nil {
- switch err := err.(type) {
- case transport.ConnectionError:
- // Nothing to do here.
- case transport.StreamError:
- if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
- }
- default:
- panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
- }
- return err
- }
-
- if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
- switch err := err.(type) {
- case transport.StreamError:
- if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
- }
- default:
- if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
- }
-
- }
- return err
- }
- statusCode := codes.OK
- statusDesc := ""
- df := func(v interface{}) error {
- if pf == compressionMade {
- var err error
- req, err = s.opts.dc.Do(bytes.NewReader(req))
- if err != nil {
- if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
- }
- return err
- }
- }
- if err := s.opts.codec.Unmarshal(req, v); err != nil {
- return err
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
- }
- return nil
- }
- reply, appErr := md.Handler(srv.server, stream.Context(), df)
- if appErr != nil {
- if err, ok := appErr.(rpcError); ok {
- statusCode = err.code
- statusDesc = err.desc
- } else {
- statusCode = convertCode(appErr)
- statusDesc = appErr.Error()
- }
- if trInfo != nil && statusCode != codes.OK {
- trInfo.tr.LazyLog(stringer(statusDesc), true)
- trInfo.tr.SetError()
- }
- if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
- return err
- }
- return nil
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(stringer("OK"), false)
- }
- opts := &transport.Options{
- Last: true,
- Delay: false,
- }
- if s.opts.cp != nil {
- stream.SetSendCompress(s.opts.cp.Type())
- }
- if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
- switch err := err.(type) {
- case transport.ConnectionError:
- // Nothing to do here.
- case transport.StreamError:
- statusCode = err.Code
- statusDesc = err.Desc
- default:
- statusCode = codes.Unknown
- statusDesc = err.Error()
- }
- return err
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
- }
- return t.WriteStatus(stream, statusCode, statusDesc)
- }
-}
-
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
- if s.opts.cp != nil {
- stream.SetSendCompress(s.opts.cp.Type())
- }
- ss := &serverStream{
- t: t,
- s: stream,
- p: &parser{r: stream},
- codec: s.opts.codec,
- cp: s.opts.cp,
- dc: s.opts.dc,
- trInfo: trInfo,
- }
- if ss.cp != nil {
- ss.cbuf = new(bytes.Buffer)
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- ss.mu.Lock()
- if err != nil && err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- ss.trInfo.tr.Finish()
- ss.trInfo.tr = nil
- ss.mu.Unlock()
- }()
- }
- if appErr := sd.Handler(srv.server, ss); appErr != nil {
- if err, ok := appErr.(rpcError); ok {
- ss.statusCode = err.code
- ss.statusDesc = err.desc
- } else if err, ok := appErr.(transport.StreamError); ok {
- ss.statusCode = err.Code
- ss.statusDesc = err.Desc
- } else {
- ss.statusCode = convertCode(appErr)
- ss.statusDesc = appErr.Error()
- }
- }
- if trInfo != nil {
- ss.mu.Lock()
- if ss.statusCode != codes.OK {
- ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
- ss.trInfo.tr.SetError()
- } else {
- ss.trInfo.tr.LazyLog(stringer("OK"), false)
- }
- ss.mu.Unlock()
- }
- return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
-
-}
-
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
- sm := stream.Method()
- if sm != "" && sm[0] == '/' {
- sm = sm[1:]
- }
- pos := strings.LastIndex(sm, "/")
- if pos == -1 {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
- trInfo.tr.SetError()
- }
- if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
- return
- }
- service := sm[:pos]
- method := sm[pos+1:]
- srv, ok := s.m[service]
- if !ok {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
- trInfo.tr.SetError()
- }
- if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
- return
- }
- // Unary RPC or Streaming RPC?
- if md, ok := srv.md[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
- return
- }
- if sd, ok := srv.sd[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
- return
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
- trInfo.tr.SetError()
- }
- if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
- }
- if trInfo != nil {
- trInfo.tr.Finish()
- }
-}
-
-// Stop stops the gRPC server. It immediately closes all open
-// connections and listeners.
-// It cancels all active RPCs on the server side and the corresponding
-// pending RPCs on the client side will get notified by connection
-// errors.
-func (s *Server) Stop() {
- s.mu.Lock()
- listeners := s.lis
- s.lis = nil
- cs := s.conns
- s.conns = nil
- s.mu.Unlock()
-
- for lis := range listeners {
- lis.Close()
- }
- for c := range cs {
- c.Close()
- }
-
- s.mu.Lock()
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
- s.mu.Unlock()
-}
-
-func init() {
- internal.TestingCloseConns = func(arg interface{}) {
- arg.(*Server).testingCloseConns()
- }
- internal.TestingUseHandlerImpl = func(arg interface{}) {
- arg.(*Server).opts.useHandlerImpl = true
- }
-}
-
-// testingCloseConns closes all existing transports but keeps s.lis
-// accepting new connections.
-func (s *Server) testingCloseConns() {
- s.mu.Lock()
- for c := range s.conns {
- c.Close()
- delete(s.conns, c)
- }
- s.mu.Unlock()
-}
-
-// SendHeader sends header metadata. It may be called at most once from a unary
-// RPC handler. The ctx is the RPC handler's Context or one derived from it.
-func SendHeader(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream, ok := transport.StreamFromContext(ctx)
- if !ok {
- return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
- }
- t := stream.ServerTransport()
- if t == nil {
- grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
- }
- return t.WriteHeader(stream, md)
-}
-
-// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
-// It may be called at most once from a unary RPC handler. The ctx is the RPC
-// handler's Context or one derived from it.
-func SetTrailer(ctx context.Context, md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- stream, ok := transport.StreamFromContext(ctx)
- if !ok {
- return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
- }
- return stream.SetTrailer(md)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go
deleted file mode 100644
index dba7f6c420..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "errors"
- "io"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/transport"
-)
-
-type streamHandler func(srv interface{}, stream ServerStream) error
-
-// StreamDesc represents a streaming RPC service's method specification.
-type StreamDesc struct {
- StreamName string
- Handler streamHandler
-
- // At least one of these is true.
- ServerStreams bool
- ClientStreams bool
-}
-
-// Stream defines the common interface a client or server stream has to satisfy.
-type Stream interface {
- // Context returns the context for this stream.
- Context() context.Context
- // SendMsg blocks until it sends m, the stream is done or the stream
- // breaks.
- // On error, it aborts the stream and returns an RPC status on client
- // side. On server side, it simply returns the error to the caller.
- // SendMsg is called by generated code.
- SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message or the stream is
- // done. On client side, it returns io.EOF when the stream is done. On
- // any other error, it aborts the stream and returns an RPC status. On
- // server side, it simply returns the error to the caller.
- RecvMsg(m interface{}) error
-}
-
-// ClientStream defines the interface a client stream has to satify.
-type ClientStream interface {
- // Header returns the header metedata received from the server if there
- // is any. It blocks if the metadata is not ready to read.
- Header() (metadata.MD, error)
- // Trailer returns the trailer metadata from the server. It must be called
- // after stream.Recv() returns non-nil error (including io.EOF) for
- // bi-directional streaming and server streaming or stream.CloseAndRecv()
- // returns for client streaming in order to receive trailer metadata if
- // present. Otherwise, it could returns an empty MD even though trailer
- // is present.
- Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met.
- CloseSend() error
- Stream
-}
-
-// NewClientStream creates a new Stream for the client side. This is called
-// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
- var (
- t transport.ClientTransport
- err error
- )
- t, err = cc.dopts.picker.Pick(ctx)
- if err != nil {
- return nil, toRPCErr(err)
- }
- // TODO(zhaoq): CallOption is omitted. Add support when it is needed.
- callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- Flush: desc.ServerStreams && desc.ClientStreams,
- }
- if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- }
- cs := &clientStream{
- desc: desc,
- codec: cc.dopts.codec,
- cp: cc.dopts.cp,
- dc: cc.dopts.dc,
- tracing: EnableTracing,
- }
- if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- cs.cbuf = new(bytes.Buffer)
- }
- if cs.tracing {
- cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
- cs.trInfo.firstLine.client = true
- if deadline, ok := ctx.Deadline(); ok {
- cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
- }
- cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
- ctx = trace.NewContext(ctx, cs.trInfo.tr)
- }
- s, err := t.NewStream(ctx, callHdr)
- if err != nil {
- cs.finish(err)
- return nil, toRPCErr(err)
- }
- cs.t = t
- cs.s = s
- cs.p = &parser{r: s}
- // Listen on ctx.Done() to detect cancellation when there is no pending
- // I/O operations on this stream.
- go func() {
- select {
- case <-t.Error():
- // Incur transport error, simply exit.
- case <-s.Context().Done():
- err := s.Context().Err()
- cs.finish(err)
- cs.closeTransportStream(transport.ContextErr(err))
- }
- }()
- return cs, nil
-}
-
-// clientStream implements a client side Stream.
-type clientStream struct {
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- cbuf *bytes.Buffer
- dc Decompressor
-
- tracing bool // set to EnableTracing when the clientStream is created.
-
- mu sync.Mutex
- closed bool
- // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
- // and is set to nil when the clientStream's finish method is called.
- trInfo traceInfo
-}
-
-func (cs *clientStream) Context() context.Context {
- return cs.s.Context()
-}
-
-func (cs *clientStream) Header() (metadata.MD, error) {
- m, err := cs.s.Header()
- if err != nil {
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- }
- return m, err
-}
-
-func (cs *clientStream) Trailer() metadata.MD {
- return cs.s.Trailer()
-}
-
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- }
- cs.mu.Unlock()
- }
- defer func() {
- if err != nil {
- cs.finish(err)
- }
- if err == nil || err == io.EOF {
- return
- }
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- err = toRPCErr(err)
- }()
- out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
- defer func() {
- if cs.cbuf != nil {
- cs.cbuf.Reset()
- }
- }()
- if err != nil {
- return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
- }
- return cs.t.Write(cs.s, out, &transport.Options{Last: false})
-}
-
-func (cs *clientStream) RecvMsg(m interface{}) (err error) {
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
- defer func() {
- // err != nil indicates the termination of the stream.
- if err != nil {
- cs.finish(err)
- }
- }()
- if err == nil {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- }
- cs.mu.Unlock()
- }
- if !cs.desc.ClientStreams || cs.desc.ServerStreams {
- return
- }
- // Special handling for client streaming rpc.
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
- cs.closeTransportStream(err)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
- }
- if err == io.EOF {
- if cs.s.StatusCode() == codes.OK {
- cs.finish(err)
- return nil
- }
- return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
- }
- return toRPCErr(err)
- }
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- if err == io.EOF {
- if cs.s.StatusCode() == codes.OK {
- // Returns io.EOF to indicate the end of the stream.
- return
- }
- return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
- }
- return toRPCErr(err)
-}
-
-func (cs *clientStream) CloseSend() (err error) {
- err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
- defer func() {
- if err != nil {
- cs.finish(err)
- }
- }()
- if err == nil || err == io.EOF {
- return
- }
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- err = toRPCErr(err)
- return
-}
-
-func (cs *clientStream) closeTransportStream(err error) {
- cs.mu.Lock()
- if cs.closed {
- cs.mu.Unlock()
- return
- }
- cs.closed = true
- cs.mu.Unlock()
- cs.t.CloseStream(cs.s, err)
-}
-
-func (cs *clientStream) finish(err error) {
- if !cs.tracing {
- return
- }
- cs.mu.Lock()
- defer cs.mu.Unlock()
- if cs.trInfo.tr != nil {
- if err == nil || err == io.EOF {
- cs.trInfo.tr.LazyPrintf("RPC: [OK]")
- } else {
- cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- cs.trInfo.tr.SetError()
- }
- cs.trInfo.tr.Finish()
- cs.trInfo.tr = nil
- }
-}
-
-// ServerStream defines the interface a server stream has to satisfy.
-type ServerStream interface {
- // SendHeader sends the header metadata. It should not be called
- // after SendProto. It fails if called multiple times or if
- // called after SendProto.
- SendHeader(metadata.MD) error
- // SetTrailer sets the trailer metadata which will be sent with the
- // RPC status.
- SetTrailer(metadata.MD)
- Stream
-}
-
-// serverStream implements a server side Stream.
-type serverStream struct {
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec Codec
- cp Compressor
- dc Decompressor
- cbuf *bytes.Buffer
- statusCode codes.Code
- statusDesc string
- trInfo *traceInfo
-
- mu sync.Mutex // protects trInfo.tr after the service handler runs.
-}
-
-func (ss *serverStream) Context() context.Context {
- return ss.s.Context()
-}
-
-func (ss *serverStream) SendHeader(md metadata.MD) error {
- return ss.t.WriteHeader(ss.s, md)
-}
-
-func (ss *serverStream) SetTrailer(md metadata.MD) {
- if md.Len() == 0 {
- return
- }
- ss.s.SetTrailer(md)
- return
-}
-
-func (ss *serverStream) SendMsg(m interface{}) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- } else {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- }()
- out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
- defer func() {
- if ss.cbuf != nil {
- ss.cbuf.Reset()
- }
- }()
- if err != nil {
- err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
- return err
- }
- return ss.t.Write(ss.s, out, &transport.Options{Last: false})
-}
-
-func (ss *serverStream) RecvMsg(m interface{}) (err error) {
- defer func() {
- if ss.trInfo != nil {
- ss.mu.Lock()
- if ss.trInfo.tr != nil {
- if err == nil {
- ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- } else if err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- }
- ss.mu.Unlock()
- }
- }()
- return recv(ss.p, ss.codec, ss.s, ss.dc, m)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go
deleted file mode 100644
index cde04fbfc9..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package grpc
-
-import (
- "bytes"
- "fmt"
- "io"
- "net"
- "strings"
- "time"
-
- "golang.org/x/net/trace"
-)
-
-// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
-// This should only be set before any RPCs are sent or received by this program.
-var EnableTracing = true
-
-// methodFamily returns the trace family for the given method.
-// It turns "/pkg.Service/GetFoo" into "pkg.Service".
-func methodFamily(m string) string {
- m = strings.TrimPrefix(m, "/") // remove leading slash
- if i := strings.Index(m, "/"); i >= 0 {
- m = m[:i] // remove everything from second slash
- }
- if i := strings.LastIndex(m, "."); i >= 0 {
- m = m[i+1:] // cut down to last dotted component
- }
- return m
-}
-
-// traceInfo contains tracing information for an RPC.
-type traceInfo struct {
- tr trace.Trace
- firstLine firstLine
-}
-
-// firstLine is the first line of an RPC trace.
-type firstLine struct {
- client bool // whether this is a client (outgoing) RPC
- remoteAddr net.Addr
- deadline time.Duration // may be zero
-}
-
-func (f *firstLine) String() string {
- var line bytes.Buffer
- io.WriteString(&line, "RPC: ")
- if f.client {
- io.WriteString(&line, "to")
- } else {
- io.WriteString(&line, "from")
- }
- fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
- if f.deadline != 0 {
- fmt.Fprint(&line, f.deadline)
- } else {
- io.WriteString(&line, "none")
- }
- return line.String()
-}
-
-// payload represents an RPC request or response payload.
-type payload struct {
- sent bool // whether this is an outgoing payload
- msg interface{} // e.g. a proto.Message
- // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
-}
-
-func (p payload) String() string {
- if p.sent {
- return fmt.Sprintf("sent: %v", p.msg)
- } else {
- return fmt.Sprintf("recv: %v", p.msg)
- }
-}
-
-type fmtStringer struct {
- format string
- a []interface{}
-}
-
-func (f *fmtStringer) String() string {
- return fmt.Sprintf(f.format, f.a...)
-}
-
-type stringer string
-
-func (s stringer) String() string { return string(s) }
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go
deleted file mode 100644
index f6b38a5a6d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package transport
-
-import (
- "fmt"
- "sync"
-
- "golang.org/x/net/http2"
-)
-
-const (
- // The default value of flow control window size in HTTP2 spec.
- defaultWindowSize = 65535
- // The initial window size for flow control.
- initialWindowSize = defaultWindowSize // for an RPC
- initialConnWindowSize = defaultWindowSize * 16 // for a connection
-)
-
-// The following defines various control items which could flow through
-// the control buffer of transport. They represent different aspects of
-// control tasks, e.g., flow control, settings, streaming resetting, etc.
-type windowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-func (windowUpdate) isItem() bool {
- return true
-}
-
-type settings struct {
- ack bool
- ss []http2.Setting
-}
-
-func (settings) isItem() bool {
- return true
-}
-
-type resetStream struct {
- streamID uint32
- code http2.ErrCode
-}
-
-func (resetStream) isItem() bool {
- return true
-}
-
-type flushIO struct {
-}
-
-func (flushIO) isItem() bool {
- return true
-}
-
-type ping struct {
- ack bool
- data [8]byte
-}
-
-func (ping) isItem() bool {
- return true
-}
-
-// quotaPool is a pool which accumulates the quota and sends it to acquire()
-// when it is available.
-type quotaPool struct {
- c chan int
-
- mu sync.Mutex
- quota int
-}
-
-// newQuotaPool creates a quotaPool which has quota q available to consume.
-func newQuotaPool(q int) *quotaPool {
- qb := "aPool{
- c: make(chan int, 1),
- }
- if q > 0 {
- qb.c <- q
- } else {
- qb.quota = q
- }
- return qb
-}
-
-// add adds n to the available quota and tries to send it on acquire.
-func (qb *quotaPool) add(n int) {
- qb.mu.Lock()
- defer qb.mu.Unlock()
- qb.quota += n
- if qb.quota <= 0 {
- return
- }
- select {
- case qb.c <- qb.quota:
- qb.quota = 0
- default:
- }
-}
-
-// cancel cancels the pending quota sent on acquire, if any.
-func (qb *quotaPool) cancel() {
- qb.mu.Lock()
- defer qb.mu.Unlock()
- select {
- case n := <-qb.c:
- qb.quota += n
- default:
- }
-}
-
-// reset cancels the pending quota sent on acquired, incremented by v and sends
-// it back on acquire.
-func (qb *quotaPool) reset(v int) {
- qb.mu.Lock()
- defer qb.mu.Unlock()
- select {
- case n := <-qb.c:
- qb.quota += n
- default:
- }
- qb.quota += v
- if qb.quota <= 0 {
- return
- }
- select {
- case qb.c <- qb.quota:
- qb.quota = 0
- default:
- }
-}
-
-// acquire returns the channel on which available quota amounts are sent.
-func (qb *quotaPool) acquire() <-chan int {
- return qb.c
-}
-
-// inFlow deals with inbound flow control
-type inFlow struct {
- // The inbound flow control limit for pending data.
- limit uint32
- // conn points to the shared connection-level inFlow that is shared
- // by all streams on that conn. It is nil for the inFlow on the conn
- // directly.
- conn *inFlow
-
- mu sync.Mutex
- // pendingData is the overall data which have been received but not been
- // consumed by applications.
- pendingData uint32
- // The amount of data the application has consumed but grpc has not sent
- // window update for them. Used to reduce window update frequency.
- pendingUpdate uint32
-}
-
-// onData is invoked when some data frame is received. It increments not only its
-// own pendingData but also that of the associated connection-level flow.
-func (f *inFlow) onData(n uint32) error {
- if n == 0 {
- return nil
- }
- f.mu.Lock()
- defer f.mu.Unlock()
- if f.pendingData+f.pendingUpdate+n > f.limit {
- return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit)
- }
- if f.conn != nil {
- if err := f.conn.onData(n); err != nil {
- return ConnectionErrorf("%v", err)
- }
- }
- f.pendingData += n
- return nil
-}
-
-// connOnRead updates the connection level states when the application consumes data.
-func (f *inFlow) connOnRead(n uint32) uint32 {
- if n == 0 || f.conn != nil {
- return 0
- }
- f.mu.Lock()
- defer f.mu.Unlock()
- f.pendingData -= n
- f.pendingUpdate += n
- if f.pendingUpdate >= f.limit/4 {
- ret := f.pendingUpdate
- f.pendingUpdate = 0
- return ret
- }
- return 0
-}
-
-// onRead is invoked when the application reads the data. It returns the window updates
-// for both stream and connection level.
-func (f *inFlow) onRead(n uint32) (swu, cwu uint32) {
- if n == 0 {
- return
- }
- f.mu.Lock()
- defer f.mu.Unlock()
- if f.pendingData == 0 {
- // pendingData has been adjusted by restoreConn.
- return
- }
- f.pendingData -= n
- f.pendingUpdate += n
- if f.pendingUpdate >= f.limit/4 {
- swu = f.pendingUpdate
- f.pendingUpdate = 0
- }
- cwu = f.conn.connOnRead(n)
- return
-}
-
-// restoreConn is invoked when a stream is terminated. It removes its stake in
-// the connection-level flow and resets its own state.
-func (f *inFlow) restoreConn() uint32 {
- if f.conn == nil {
- return 0
- }
- f.mu.Lock()
- defer f.mu.Unlock()
- n := f.pendingData
- f.pendingData = 0
- f.pendingUpdate = 0
- return f.conn.connOnRead(n)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go
deleted file mode 100644
index d7e18a0b65..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-// This file is the implementation of a gRPC server using HTTP/2 which
-// uses the standard Go http2 Server implementation (via the
-// http.Handler interface), rather than speaking low-level HTTP/2
-// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
-
-package transport
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
-// from inside an http.Handler. It requires that the http Server
-// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
- if r.ProtoMajor != 2 {
- return nil, errors.New("gRPC requires HTTP/2")
- }
- if r.Method != "POST" {
- return nil, errors.New("invalid gRPC request method")
- }
- if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
- return nil, errors.New("invalid gRPC request content-type")
- }
- if _, ok := w.(http.Flusher); !ok {
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
- }
- if _, ok := w.(http.CloseNotifier); !ok {
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
- }
-
- st := &serverHandlerTransport{
- rw: w,
- req: r,
- closedCh: make(chan struct{}),
- writes: make(chan func()),
- }
-
- if v := r.Header.Get("grpc-timeout"); v != "" {
- to, err := timeoutDecode(v)
- if err != nil {
- return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err)
- }
- st.timeoutSet = true
- st.timeout = to
- }
-
- var metakv []string
- for k, vv := range r.Header {
- k = strings.ToLower(k)
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- if k == "user-agent" {
- // user-agent is special. Copying logic of http_util.go.
- if i := strings.LastIndex(v, " "); i == -1 {
- // There is no application user agent string being set
- continue
- } else {
- v = v[:i]
- }
- }
- metakv = append(metakv, k, v)
-
- }
- }
- st.headerMD = metadata.Pairs(metakv...)
-
- return st, nil
-}
-
-// serverHandlerTransport is an implementation of ServerTransport
-// which replies to exactly one gRPC request (exactly one HTTP request),
-// using the net/http.Handler interface. This http.Handler is guaranteed
-// at this point to be speaking over HTTP/2, so it's able to speak valid
-// gRPC.
-type serverHandlerTransport struct {
- rw http.ResponseWriter
- req *http.Request
- timeoutSet bool
- timeout time.Duration
- didCommonHeaders bool
-
- headerMD metadata.MD
-
- closeOnce sync.Once
- closedCh chan struct{} // closed on Close
-
- // writes is a channel of code to run serialized in the
- // ServeHTTP (HandleStreams) goroutine. The channel is closed
- // when WriteStatus is called.
- writes chan func()
-}
-
-func (ht *serverHandlerTransport) Close() error {
- ht.closeOnce.Do(ht.closeCloseChanOnce)
- return nil
-}
-
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
-
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
-
-// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
-// the empty string if unknown.
-type strAddr string
-
-func (a strAddr) Network() string {
- if a != "" {
- // Per the documentation on net/http.Request.RemoteAddr, if this is
- // set, it's set to the IP:port of the peer (hence, TCP):
- // https://golang.org/pkg/net/http/#Request
- //
- // If we want to support Unix sockets later, we can
- // add our own grpc-specific convention within the
- // grpc codebase to set RemoteAddr to a different
- // format, or probably better: we can attach it to the
- // context and use that from serverHandlerTransport.RemoteAddr.
- return "tcp"
- }
- return ""
-}
-
-func (a strAddr) String() string { return string(a) }
-
-// do runs fn in the ServeHTTP goroutine.
-func (ht *serverHandlerTransport) do(fn func()) error {
- select {
- case ht.writes <- fn:
- return nil
- case <-ht.closedCh:
- return ErrConnClosing
- }
-}
-
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
- err := ht.do(func() {
- ht.writeCommonHeaders(s)
-
- // And flush, in case no header or body has been sent yet.
- // This forces a separation of headers and trailers if this is the
- // first call (for example, in end2end tests's TestNoService).
- ht.rw.(http.Flusher).Flush()
-
- h := ht.rw.Header()
- h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
- if statusDesc != "" {
- h.Set("Grpc-Message", statusDesc)
- }
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
- for _, v := range vv {
- // http2 ResponseWriter mechanism to
- // send undeclared Trailers after the
- // headers have possibly been written.
- h.Add(http2.TrailerPrefix+k, v)
- }
- }
- }
- })
- close(ht.writes)
- return err
-}
-
-// writeCommonHeaders sets common headers on the first write
-// call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
- if ht.didCommonHeaders {
- return
- }
- ht.didCommonHeaders = true
-
- h := ht.rw.Header()
- h["Date"] = nil // suppress Date to make tests happy; TODO: restore
- h.Set("Content-Type", "application/grpc")
-
- // Predeclare trailers we'll set later in WriteStatus (after the body).
- // This is a SHOULD in the HTTP RFC, and the way you add (known)
- // Trailers per the net/http.ResponseWriter contract.
- // See https://golang.org/pkg/net/http/#ResponseWriter
- // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
- h.Add("Trailer", "Grpc-Status")
- h.Add("Trailer", "Grpc-Message")
-
- if s.sendCompress != "" {
- h.Set("Grpc-Encoding", s.sendCompress)
- }
-}
-
-func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
- return ht.do(func() {
- ht.writeCommonHeaders(s)
- ht.rw.Write(data)
- if !opts.Delay {
- ht.rw.(http.Flusher).Flush()
- }
- })
-}
-
-func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
- return ht.do(func() {
- ht.writeCommonHeaders(s)
- h := ht.rw.Header()
- for k, vv := range md {
- for _, v := range vv {
- h.Add(k, v)
- }
- }
- ht.rw.WriteHeader(200)
- ht.rw.(http.Flusher).Flush()
- })
-}
-
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
- // With this transport type there will be exactly 1 stream: this HTTP request.
-
- var ctx context.Context
- var cancel context.CancelFunc
- if ht.timeoutSet {
- ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
- } else {
- ctx, cancel = context.WithCancel(context.Background())
- }
-
- // requestOver is closed when either the request's context is done
- // or the status has been written via WriteStatus.
- requestOver := make(chan struct{})
-
- // clientGone receives a single value if peer is gone, either
- // because the underlying connection is dead or because the
- // peer sends an http2 RST_STREAM.
- clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
- go func() {
- select {
- case <-requestOver:
- return
- case <-ht.closedCh:
- case <-clientGone:
- }
- cancel()
- }()
-
- req := ht.req
-
- s := &Stream{
- id: 0, // irrelevant
- windowHandler: func(int) {}, // nothing
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- }
- pr := &peer.Peer{
- Addr: ht.RemoteAddr(),
- }
- if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{*req.TLS}
- }
- ctx = metadata.NewContext(ctx, ht.headerMD)
- ctx = peer.NewContext(ctx, pr)
- s.ctx = newContextWithStream(ctx, s)
- s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
-
- // readerDone is closed when the Body.Read-ing goroutine exits.
- readerDone := make(chan struct{})
- go func() {
- defer close(readerDone)
- for {
- buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership
- n, err := req.Body.Read(buf)
- if n > 0 {
- s.buf.put(&recvMsg{data: buf[:n]})
- }
- if err != nil {
- s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
- return
- }
- }
- }()
-
- // startStream is provided by the *grpc.Server's serveStreams.
- // It starts a goroutine serving s and exits immediately.
- // The goroutine that is started is the one that then calls
- // into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
- startStream(s)
-
- ht.runStream()
- close(requestOver)
-
- // Wait for reading goroutine to finish.
- req.Body.Close()
- <-readerDone
-}
-
-func (ht *serverHandlerTransport) runStream() {
- for {
- select {
- case fn, ok := <-ht.writes:
- if !ok {
- return
- }
- fn()
- case <-ht.closedCh:
- return
- }
- }
-}
-
-// mapRecvMsgError returns the non-nil err into the appropriate
-// error value as expected by callers of *grpc.parser.recvMsg.
-// In particular, in can only be:
-// * io.EOF
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * of type transport.StreamError
-func mapRecvMsgError(err error) error {
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- return err
- }
- if se, ok := err.(http2.StreamError); ok {
- if code, ok := http2ErrConvTab[se.Code]; ok {
- return StreamError{
- Code: code,
- Desc: se.Error(),
- }
- }
- }
- return ConnectionError{Desc: err.Error()}
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go
deleted file mode 100644
index 66fabbba7d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go
+++ /dev/null
@@ -1,879 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "errors"
- "io"
- "math"
- "net"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-// http2Client implements the ClientTransport interface with HTTP2.
-type http2Client struct {
- target string // server name/addr
- userAgent string
- conn net.Conn // underlying communication channel
- authInfo credentials.AuthInfo // auth info about the connection
- nextID uint32 // the next stream ID to be used
-
- // writableChan synchronizes write access to the transport.
- // A writer acquires the write lock by sending a value on writableChan
- // and releases it by receiving from writableChan.
- writableChan chan int
- // shutdownChan is closed when Close is called.
- // Blocking operations should select on shutdownChan to avoid
- // blocking forever after Close.
- // TODO(zhaoq): Maybe have a channel context?
- shutdownChan chan struct{}
- // errorChan is closed to notify the I/O error to the caller.
- errorChan chan struct{}
-
- framer *framer
- hBuf *bytes.Buffer // the buffer for HPACK encoding
- hEnc *hpack.Encoder // HPACK encoder
-
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- controlBuf *recvBuffer
- fc *inFlow
- // sendQuotaPool provides flow control to outbound message.
- sendQuotaPool *quotaPool
- // streamsQuota limits the max number of concurrent streams.
- streamsQuota *quotaPool
-
- // The scheme used: https if TLS is on, http otherwise.
- scheme string
-
- authCreds []credentials.Credentials
-
- mu sync.Mutex // guard the following variables
- state transportState // the state of underlying connection
- activeStreams map[uint32]*Stream
- // The max number of concurrent streams
- maxStreams int
- // the per-stream outbound flow control window size set by the peer.
- streamSendQuota uint32
-}
-
-// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
-// and starts to receive messages on it. Non-nil error returns if construction
-// fails.
-func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) {
- if opts.Dialer == nil {
- // Set the default Dialer.
- opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) {
- return net.DialTimeout("tcp", addr, timeout)
- }
- }
- scheme := "http"
- startT := time.Now()
- timeout := opts.Timeout
- conn, connErr := opts.Dialer(addr, timeout)
- if connErr != nil {
- return nil, ConnectionErrorf("transport: %v", connErr)
- }
- var authInfo credentials.AuthInfo
- for _, c := range opts.AuthOptions {
- if ccreds, ok := c.(credentials.TransportAuthenticator); ok {
- scheme = "https"
- // TODO(zhaoq): Now the first TransportAuthenticator is used if there are
- // multiple ones provided. Revisit this if it is not appropriate. Probably
- // place the ClientTransport construction into a separate function to make
- // things clear.
- if timeout > 0 {
- timeout -= time.Since(startT)
- }
- conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout)
- break
- }
- }
- if connErr != nil {
- return nil, ConnectionErrorf("transport: %v", connErr)
- }
- defer func() {
- if err != nil {
- conn.Close()
- }
- }()
- // Send connection preface to server.
- n, err := conn.Write(clientPreface)
- if err != nil {
- return nil, ConnectionErrorf("transport: %v", err)
- }
- if n != len(clientPreface) {
- return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
- }
- framer := newFramer(conn)
- if initialWindowSize != defaultWindowSize {
- err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
- } else {
- err = framer.writeSettings(true)
- }
- if err != nil {
- return nil, ConnectionErrorf("transport: %v", err)
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
- if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
- return nil, ConnectionErrorf("transport: %v", err)
- }
- }
- ua := primaryUA
- if opts.UserAgent != "" {
- ua = opts.UserAgent + " " + ua
- }
- var buf bytes.Buffer
- t := &http2Client{
- target: addr,
- userAgent: ua,
- conn: conn,
- authInfo: authInfo,
- // The client initiated stream id is odd starting from 1.
- nextID: 1,
- writableChan: make(chan int, 1),
- shutdownChan: make(chan struct{}),
- errorChan: make(chan struct{}),
- framer: framer,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- controlBuf: newRecvBuffer(),
- fc: &inFlow{limit: initialConnWindowSize},
- sendQuotaPool: newQuotaPool(defaultWindowSize),
- scheme: scheme,
- state: reachable,
- activeStreams: make(map[uint32]*Stream),
- authCreds: opts.AuthOptions,
- maxStreams: math.MaxInt32,
- streamSendQuota: defaultWindowSize,
- }
- go t.controller()
- t.writableChan <- 0
- // Start the reader goroutine for incoming message. The threading model
- // on receiving is that each transport has a dedicated goroutine which
- // reads HTTP2 frame from network. Then it dispatches the frame to the
- // corresponding stream entity.
- go t.reader()
- return t, nil
-}
-
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
- fc := &inFlow{
- limit: initialWindowSize,
- conn: t.fc,
- }
- // TODO(zhaoq): Handle uint32 overflow of Stream.id.
- s := &Stream{
- id: t.nextID,
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- fc: fc,
- sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
- headerChan: make(chan struct{}),
- }
- t.nextID += 2
- s.windowHandler = func(n int) {
- t.updateWindow(s, uint32(n))
- }
- // Make a stream be able to cancel the pending operations by itself.
- s.ctx, s.cancel = context.WithCancel(ctx)
- s.dec = &recvBufferReader{
- ctx: s.ctx,
- recv: s.buf,
- }
- return s
-}
-
-// NewStream creates a stream and register it into the transport as "active"
-// streams.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
- // Record the timeout value on the context.
- var timeout time.Duration
- if dl, ok := ctx.Deadline(); ok {
- timeout = dl.Sub(time.Now())
- if timeout <= 0 {
- return nil, ContextErr(context.DeadlineExceeded)
- }
- }
- pr := &peer.Peer{
- Addr: t.conn.RemoteAddr(),
- }
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- ctx = peer.NewContext(ctx, pr)
- authData := make(map[string]string)
- for _, c := range t.authCreds {
- // Construct URI required to get auth request metadata.
- var port string
- if pos := strings.LastIndex(t.target, ":"); pos != -1 {
- // Omit port if it is the default one.
- if t.target[pos+1:] != "443" {
- port = ":" + t.target[pos+1:]
- }
- }
- pos := strings.LastIndex(callHdr.Method, "/")
- if pos == -1 {
- return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
- }
- audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
- data, err := c.GetRequestMetadata(ctx, audience)
- if err != nil {
- return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err)
- }
- for k, v := range data {
- authData[k] = v
- }
- }
- t.mu.Lock()
- if t.state != reachable {
- t.mu.Unlock()
- return nil, ErrConnClosing
- }
- checkStreamsQuota := t.streamsQuota != nil
- t.mu.Unlock()
- if checkStreamsQuota {
- sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire())
- if err != nil {
- return nil, err
- }
- // Returns the quota balance back.
- if sq > 1 {
- t.streamsQuota.add(sq - 1)
- }
- }
- if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil {
- // t.streamsQuota will be updated when t.CloseStream is invoked.
- return nil, err
- }
- t.mu.Lock()
- if t.state != reachable {
- t.mu.Unlock()
- return nil, ErrConnClosing
- }
- s := t.newStream(ctx, callHdr)
- t.activeStreams[s.id] = s
-
- // This stream is not counted when applySetings(...) initialize t.streamsQuota.
- // Reset t.streamsQuota to the right value.
- var reset bool
- if !checkStreamsQuota && t.streamsQuota != nil {
- reset = true
- }
- t.mu.Unlock()
- if reset {
- t.streamsQuota.reset(-1)
- }
-
- // HPACK encodes various headers. Note that once WriteField(...) is
- // called, the corresponding headers/continuation frame has to be sent
- // because hpack.Encoder is stateful.
- t.hBuf.Reset()
- t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
- t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
-
- if callHdr.SendCompress != "" {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
- }
- if timeout > 0 {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
- }
- for k, v := range authData {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
- }
- var (
- hasMD bool
- endHeaders bool
- )
- if md, ok := metadata.FromContext(ctx); ok {
- hasMD = true
- for k, v := range md {
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
- }
- }
- }
- first := true
- // Sends the headers in a single batch even when they span multiple frames.
- for !endHeaders {
- size := t.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- var flush bool
- if endHeaders && (hasMD || callHdr.Flush) {
- flush = true
- }
- if first {
- // Sends a HeadersFrame to server to start a new stream.
- p := http2.HeadersFrameParam{
- StreamID: s.id,
- BlockFragment: t.hBuf.Next(size),
- EndStream: false,
- EndHeaders: endHeaders,
- }
- // Do a force flush for the buffered frames iff it is the last headers frame
- // and there is header metadata to be sent. Otherwise, there is flushing until
- // the corresponding data frame is written.
- err = t.framer.writeHeaders(flush, p)
- first = false
- } else {
- // Sends Continuation frames for the leftover headers.
- err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
- }
- if err != nil {
- t.notifyError(err)
- return nil, ConnectionErrorf("transport: %v", err)
- }
- }
- t.writableChan <- 0
- return s, nil
-}
-
-// CloseStream clears the footprint of a stream when the stream is not needed any more.
-// This must not be executed in reader's goroutine.
-func (t *http2Client) CloseStream(s *Stream, err error) {
- var updateStreams bool
- t.mu.Lock()
- if t.streamsQuota != nil {
- updateStreams = true
- }
- delete(t.activeStreams, s.id)
- t.mu.Unlock()
- if updateStreams {
- t.streamsQuota.add(1)
- }
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), the caller needs
- // to call cancel on the stream to interrupt the blocking on
- // other goroutines.
- s.cancel()
- s.mu.Lock()
- if q := s.fc.restoreConn(); q > 0 {
- t.controlBuf.put(&windowUpdate{0, q})
- }
- if s.state == streamDone {
- s.mu.Unlock()
- return
- }
- if !s.headerDone {
- close(s.headerChan)
- s.headerDone = true
- }
- s.state = streamDone
- s.mu.Unlock()
- if _, ok := err.(StreamError); ok {
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
- }
-}
-
-// Close kicks off the shutdown process of the transport. This should be called
-// only once on a transport. Once it is called, the transport should not be
-// accessed any more.
-func (t *http2Client) Close() (err error) {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return errors.New("transport: Close() was already called")
- }
- t.state = closing
- t.mu.Unlock()
- close(t.shutdownChan)
- err = t.conn.Close()
- t.mu.Lock()
- streams := t.activeStreams
- t.activeStreams = nil
- t.mu.Unlock()
- // Notify all active streams.
- for _, s := range streams {
- s.mu.Lock()
- if !s.headerDone {
- close(s.headerChan)
- s.headerDone = true
- }
- s.mu.Unlock()
- s.write(recvMsg{err: ErrConnClosing})
- }
- return
-}
-
-// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
-// should proceed only if Write returns nil.
-// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
-// if it improves the performance.
-func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
- r := bytes.NewBuffer(data)
- for {
- var p []byte
- if r.Len() > 0 {
- size := http2MaxFrameLen
- s.sendQuotaPool.add(0)
- // Wait until the stream has some quota to send the data.
- sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
- if err != nil {
- return err
- }
- t.sendQuotaPool.add(0)
- // Wait until the transport has some quota to send the data.
- tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
- if err != nil {
- if _, ok := err.(StreamError); ok {
- t.sendQuotaPool.cancel()
- }
- return err
- }
- if sq < size {
- size = sq
- }
- if tq < size {
- size = tq
- }
- p = r.Next(size)
- ps := len(p)
- if ps < sq {
- // Overbooked stream quota. Return it back.
- s.sendQuotaPool.add(sq - ps)
- }
- if ps < tq {
- // Overbooked transport quota. Return it back.
- t.sendQuotaPool.add(tq - ps)
- }
- }
- var (
- endStream bool
- forceFlush bool
- )
- if opts.Last && r.Len() == 0 {
- endStream = true
- }
- // Indicate there is a writer who is about to write a data frame.
- t.framer.adjustNumWriters(1)
- // Got some quota. Try to acquire writing privilege on the transport.
- if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
- if t.framer.adjustNumWriters(-1) == 0 {
- // This writer is the last one in this batch and has the
- // responsibility to flush the buffered frames. It queues
- // a flush request to controlBuf instead of flushing directly
- // in order to avoid the race with other writing or flushing.
- t.controlBuf.put(&flushIO{})
- }
- return err
- }
- if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
- // Do a force flush iff this is last frame for the entire gRPC message
- // and the caller is the only writer at this moment.
- forceFlush = true
- }
- // If WriteData fails, all the pending streams will be handled
- // by http2Client.Close(). No explicit CloseStream() needs to be
- // invoked.
- if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
- t.notifyError(err)
- return ConnectionErrorf("transport: %v", err)
- }
- if t.framer.adjustNumWriters(-1) == 0 {
- t.framer.flushWrite()
- }
- t.writableChan <- 0
- if r.Len() == 0 {
- break
- }
- }
- if !opts.Last {
- return nil
- }
- s.mu.Lock()
- if s.state != streamDone {
- if s.state == streamReadDone {
- s.state = streamDone
- } else {
- s.state = streamWriteDone
- }
- }
- s.mu.Unlock()
- return nil
-}
-
-func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- s, ok := t.activeStreams[f.Header().StreamID]
- return s, ok
-}
-
-// updateWindow adjusts the inbound quota for the stream and the transport.
-// Window updates will deliver to the controller for sending when
-// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *Stream, n uint32) {
- swu, cwu := s.fc.onRead(n)
- if swu > 0 {
- t.controlBuf.put(&windowUpdate{s.id, swu})
- }
- if cwu > 0 {
- t.controlBuf.put(&windowUpdate{0, cwu})
- }
-}
-
-func (t *http2Client) handleData(f *http2.DataFrame) {
- // Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- size := len(f.Data())
- if size > 0 {
- if err := s.fc.onData(uint32(size)); err != nil {
- if _, ok := err.(ConnectionError); ok {
- t.notifyError(err)
- return
- }
- s.mu.Lock()
- if s.state == streamDone {
- s.mu.Unlock()
- return
- }
- s.state = streamDone
- s.statusCode = codes.Internal
- s.statusDesc = err.Error()
- s.mu.Unlock()
- s.write(recvMsg{err: io.EOF})
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
- return
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- data := make([]byte, size)
- copy(data, f.Data())
- s.write(recvMsg{data: data})
- }
- // The server has closed the stream without sending trailers. Record that
- // the read direction is closed, and set the status appropriately.
- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
- s.mu.Lock()
- if s.state == streamWriteDone {
- s.state = streamDone
- } else {
- s.state = streamReadDone
- }
- s.statusCode = codes.Internal
- s.statusDesc = "server closed the stream without sending trailers"
- s.mu.Unlock()
- s.write(recvMsg{err: io.EOF})
- }
-}
-
-func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- s.mu.Lock()
- if s.state == streamDone {
- s.mu.Unlock()
- return
- }
- s.state = streamDone
- if !s.headerDone {
- close(s.headerChan)
- s.headerDone = true
- }
- s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
- if !ok {
- grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
- }
- s.mu.Unlock()
- s.write(recvMsg{err: io.EOF})
-}
-
-func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
- if f.IsAck() {
- return
- }
- var ss []http2.Setting
- f.ForeachSetting(func(s http2.Setting) error {
- ss = append(ss, s)
- return nil
- })
- // The settings will be applied once the ack is sent.
- t.controlBuf.put(&settings{ack: true, ss: ss})
-}
-
-func (t *http2Client) handlePing(f *http2.PingFrame) {
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-}
-
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
- // TODO(zhaoq): GoAwayFrame handler to be implemented
-}
-
-func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- id := f.Header().StreamID
- incr := f.Increment
- if id == 0 {
- t.sendQuotaPool.add(int(incr))
- return
- }
- if s, ok := t.getStream(f); ok {
- s.sendQuotaPool.add(int(incr))
- }
-}
-
-// operateHeaders takes action on the decoded headers.
-func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
- s, ok := t.getStream(frame)
- if !ok {
- return
- }
- var state decodeState
- for _, hf := range frame.Fields {
- state.processHeaderField(hf)
- }
- if state.err != nil {
- s.write(recvMsg{err: state.err})
- // Something wrong. Stops reading even when there is remaining.
- return
- }
-
- endStream := frame.StreamEnded()
-
- s.mu.Lock()
- if !endStream {
- s.recvCompress = state.encoding
- }
- if !s.headerDone {
- if !endStream && len(state.mdata) > 0 {
- s.header = state.mdata
- }
- close(s.headerChan)
- s.headerDone = true
- }
- if !endStream || s.state == streamDone {
- s.mu.Unlock()
- return
- }
-
- if len(state.mdata) > 0 {
- s.trailer = state.mdata
- }
- s.state = streamDone
- s.statusCode = state.statusCode
- s.statusDesc = state.statusDesc
- s.mu.Unlock()
-
- s.write(recvMsg{err: io.EOF})
-}
-
-func handleMalformedHTTP2(s *Stream, err http2.StreamError) {
- s.mu.Lock()
- if !s.headerDone {
- close(s.headerChan)
- s.headerDone = true
- }
- s.mu.Unlock()
- s.write(recvMsg{err: StreamErrorf(http2ErrConvTab[err.Code], "%v", err)})
-}
-
-// reader runs as a separate goroutine in charge of reading data from network
-// connection.
-//
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
-// optimal.
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
-func (t *http2Client) reader() {
- // Check the validity of server preface.
- frame, err := t.framer.readFrame()
- if err != nil {
- t.notifyError(err)
- return
- }
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- t.notifyError(err)
- return
- }
- t.handleSettings(sf)
-
- // loop to keep reading incoming messages on this transport.
- for {
- frame, err := t.framer.readFrame()
- if err != nil {
- // Abort an active stream if the http2.Framer returns a
- // http2.StreamError. This can happen only if the server's response
- // is malformed http2.
- if se, ok := err.(http2.StreamError); ok {
- t.mu.Lock()
- s := t.activeStreams[se.StreamID]
- t.mu.Unlock()
- if s != nil {
- handleMalformedHTTP2(s, se)
- }
- continue
- } else {
- // Transport error.
- t.notifyError(err)
- return
- }
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- t.operateHeaders(frame)
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.GoAwayFrame:
- t.handleGoAway(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- default:
- grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame)
- }
- }
-}
-
-func (t *http2Client) applySettings(ss []http2.Setting) {
- for _, s := range ss {
- switch s.ID {
- case http2.SettingMaxConcurrentStreams:
- // TODO(zhaoq): This is a hack to avoid significant refactoring of the
- // code to deal with the unrealistic int32 overflow. Probably will try
- // to find a better way to handle this later.
- if s.Val > math.MaxInt32 {
- s.Val = math.MaxInt32
- }
- t.mu.Lock()
- reset := t.streamsQuota != nil
- if !reset {
- t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
- }
- ms := t.maxStreams
- t.maxStreams = int(s.Val)
- t.mu.Unlock()
- if reset {
- t.streamsQuota.reset(int(s.Val) - ms)
- }
- case http2.SettingInitialWindowSize:
- t.mu.Lock()
- for _, stream := range t.activeStreams {
- // Adjust the sending quota for each stream.
- stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
- }
- t.streamSendQuota = s.Val
- t.mu.Unlock()
- }
- }
-}
-
-// controller running in a separate goroutine takes charge of sending control
-// frames (e.g., window update, reset stream, setting, etc.) to the server.
-func (t *http2Client) controller() {
- for {
- select {
- case i := <-t.controlBuf.get():
- t.controlBuf.load()
- select {
- case <-t.writableChan:
- switch i := i.(type) {
- case *windowUpdate:
- t.framer.writeWindowUpdate(true, i.streamID, i.increment)
- case *settings:
- if i.ack {
- t.framer.writeSettingsAck(true)
- t.applySettings(i.ss)
- } else {
- t.framer.writeSettings(true, i.ss...)
- }
- case *resetStream:
- t.framer.writeRSTStream(true, i.streamID, i.code)
- case *flushIO:
- t.framer.flushWrite()
- case *ping:
- t.framer.writePing(true, i.ack, i.data)
- default:
- grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
- }
- t.writableChan <- 0
- continue
- case <-t.shutdownChan:
- return
- }
- case <-t.shutdownChan:
- return
- }
- }
-}
-
-func (t *http2Client) Error() <-chan struct{} {
- return t.errorChan
-}
-
-func (t *http2Client) notifyError(err error) {
- t.mu.Lock()
- defer t.mu.Unlock()
- // make sure t.errorChan is closed only once.
- if t.state == reachable {
- t.state = unreachable
- close(t.errorChan)
- grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go
deleted file mode 100644
index cec441cfc7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go
+++ /dev/null
@@ -1,691 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "errors"
- "io"
- "math"
- "net"
- "strconv"
- "sync"
-
- "golang.org/x/net/context"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-// ErrIllegalHeaderWrite indicates that setting header is illegal because of
-// the stream's state.
-var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
-
-// http2Server implements the ServerTransport interface with HTTP2.
-type http2Server struct {
- conn net.Conn
- maxStreamID uint32 // max stream ID ever seen
- authInfo credentials.AuthInfo // auth info about the connection
- // writableChan synchronizes write access to the transport.
- // A writer acquires the write lock by receiving a value on writableChan
- // and releases it by sending on writableChan.
- writableChan chan int
- // shutdownChan is closed when Close is called.
- // Blocking operations should select on shutdownChan to avoid
- // blocking forever after Close.
- shutdownChan chan struct{}
- framer *framer
- hBuf *bytes.Buffer // the buffer for HPACK encoding
- hEnc *hpack.Encoder // HPACK encoder
-
- // The max number of concurrent streams.
- maxStreams uint32
- // controlBuf delivers all the control related tasks (e.g., window
- // updates, reset streams, and various settings) to the controller.
- controlBuf *recvBuffer
- fc *inFlow
- // sendQuotaPool provides flow control to outbound message.
- sendQuotaPool *quotaPool
-
- mu sync.Mutex // guard the following
- state transportState
- activeStreams map[uint32]*Stream
- // the per-stream outbound flow control window size set by the peer.
- streamSendQuota uint32
-}
-
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
- framer := newFramer(conn)
- // Send initial settings as connection preface to client.
- var settings []http2.Setting
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
- // permitted in the HTTP2 spec.
- if maxStreams == 0 {
- maxStreams = math.MaxUint32
- } else {
- settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams})
- }
- if initialWindowSize != defaultWindowSize {
- settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
- }
- if err := framer.writeSettings(true, settings...); err != nil {
- return nil, ConnectionErrorf("transport: %v", err)
- }
- // Adjust the connection flow control window if needed.
- if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
- if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
- return nil, ConnectionErrorf("transport: %v", err)
- }
- }
- var buf bytes.Buffer
- t := &http2Server{
- conn: conn,
- authInfo: authInfo,
- framer: framer,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- maxStreams: maxStreams,
- controlBuf: newRecvBuffer(),
- fc: &inFlow{limit: initialConnWindowSize},
- sendQuotaPool: newQuotaPool(defaultWindowSize),
- state: reachable,
- writableChan: make(chan int, 1),
- shutdownChan: make(chan struct{}),
- activeStreams: make(map[uint32]*Stream),
- streamSendQuota: defaultWindowSize,
- }
- go t.controller()
- t.writableChan <- 0
- return t, nil
-}
-
-// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) {
- buf := newRecvBuffer()
- fc := &inFlow{
- limit: initialWindowSize,
- conn: t.fc,
- }
- s := &Stream{
- id: frame.Header().StreamID,
- st: t,
- buf: buf,
- fc: fc,
- }
-
- var state decodeState
- for _, hf := range frame.Fields {
- state.processHeaderField(hf)
- }
- if err := state.err; err != nil {
- if se, ok := err.(StreamError); ok {
- t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
- }
- return
- }
-
- if frame.StreamEnded() {
- // s is just created by the caller. No lock needed.
- s.state = streamReadDone
- }
- s.recvCompress = state.encoding
- if state.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
- } else {
- s.ctx, s.cancel = context.WithCancel(context.TODO())
- }
- pr := &peer.Peer{
- Addr: t.conn.RemoteAddr(),
- }
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- s.ctx = peer.NewContext(s.ctx, pr)
- // Cache the current stream to the context so that the server application
- // can find out. Required when the server wants to send some metadata
- // back to the client (unary call only).
- s.ctx = newContextWithStream(s.ctx, s)
- // Attach the received metadata to the context.
- if len(state.mdata) > 0 {
- s.ctx = metadata.NewContext(s.ctx, state.mdata)
- }
-
- s.dec = &recvBufferReader{
- ctx: s.ctx,
- recv: s.buf,
- }
- s.recvCompress = state.encoding
- s.method = state.method
- t.mu.Lock()
- if t.state != reachable {
- t.mu.Unlock()
- return
- }
- if uint32(len(t.activeStreams)) >= t.maxStreams {
- t.mu.Unlock()
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
- return
- }
- s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
- t.activeStreams[s.id] = s
- t.mu.Unlock()
- s.windowHandler = func(n int) {
- t.updateWindow(s, uint32(n))
- }
- handle(s)
-}
-
-// HandleStreams receives incoming streams using the given handler. This is
-// typically run in a separate goroutine.
-func (t *http2Server) HandleStreams(handle func(*Stream)) {
- // Check the validity of client preface.
- preface := make([]byte, len(clientPreface))
- if _, err := io.ReadFull(t.conn, preface); err != nil {
- grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
- t.Close()
- return
- }
- if !bytes.Equal(preface, clientPreface) {
- grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
- t.Close()
- return
- }
-
- frame, err := t.framer.readFrame()
- if err != nil {
- grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
- t.Close()
- return
- }
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
- t.Close()
- return
- }
- t.handleSettings(sf)
-
- for {
- frame, err := t.framer.readFrame()
- if err != nil {
- t.Close()
- return
- }
- switch frame := frame.(type) {
- case *http2.MetaHeadersFrame:
- id := frame.Header().StreamID
- if id%2 != 1 || id <= t.maxStreamID {
- // illegal gRPC stream id.
- grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id)
- t.Close()
- break
- }
- t.maxStreamID = id
- t.operateHeaders(frame, handle)
- case *http2.DataFrame:
- t.handleData(frame)
- case *http2.RSTStreamFrame:
- t.handleRSTStream(frame)
- case *http2.SettingsFrame:
- t.handleSettings(frame)
- case *http2.PingFrame:
- t.handlePing(frame)
- case *http2.WindowUpdateFrame:
- t.handleWindowUpdate(frame)
- case *http2.GoAwayFrame:
- break
- default:
- grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
- }
- }
-}
-
-func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.activeStreams == nil {
- // The transport is closing.
- return nil, false
- }
- s, ok := t.activeStreams[f.Header().StreamID]
- if !ok {
- // The stream is already done.
- return nil, false
- }
- return s, true
-}
-
-// updateWindow adjusts the inbound quota for the stream and the transport.
-// Window updates will deliver to the controller for sending when
-// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *Stream, n uint32) {
- swu, cwu := s.fc.onRead(n)
- if swu > 0 {
- t.controlBuf.put(&windowUpdate{s.id, swu})
- }
- if cwu > 0 {
- t.controlBuf.put(&windowUpdate{0, cwu})
- }
-}
-
-func (t *http2Server) handleData(f *http2.DataFrame) {
- // Select the right stream to dispatch.
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- size := len(f.Data())
- if size > 0 {
- if err := s.fc.onData(uint32(size)); err != nil {
- if _, ok := err.(ConnectionError); ok {
- grpclog.Printf("transport: http2Server %v", err)
- t.Close()
- return
- }
- t.closeStream(s)
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
- return
- }
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- data := make([]byte, size)
- copy(data, f.Data())
- s.write(recvMsg{data: data})
- }
- if f.Header().Flags.Has(http2.FlagDataEndStream) {
- // Received the end of stream from the client.
- s.mu.Lock()
- if s.state != streamDone {
- if s.state == streamWriteDone {
- s.state = streamDone
- } else {
- s.state = streamReadDone
- }
- }
- s.mu.Unlock()
- s.write(recvMsg{err: io.EOF})
- }
-}
-
-func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
- s, ok := t.getStream(f)
- if !ok {
- return
- }
- t.closeStream(s)
-}
-
-func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
- if f.IsAck() {
- return
- }
- var ss []http2.Setting
- f.ForeachSetting(func(s http2.Setting) error {
- ss = append(ss, s)
- return nil
- })
- // The settings will be applied once the ack is sent.
- t.controlBuf.put(&settings{ack: true, ss: ss})
-}
-
-func (t *http2Server) handlePing(f *http2.PingFrame) {
- pingAck := &ping{ack: true}
- copy(pingAck.data[:], f.Data[:])
- t.controlBuf.put(pingAck)
-}
-
-func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- id := f.Header().StreamID
- incr := f.Increment
- if id == 0 {
- t.sendQuotaPool.add(int(incr))
- return
- }
- if s, ok := t.getStream(f); ok {
- s.sendQuotaPool.add(int(incr))
- }
-}
-
-func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
- first := true
- endHeaders := false
- var err error
- // Sends the headers in a single batch.
- for !endHeaders {
- size := t.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- if first {
- p := http2.HeadersFrameParam{
- StreamID: s.id,
- BlockFragment: b.Next(size),
- EndStream: endStream,
- EndHeaders: endHeaders,
- }
- err = t.framer.writeHeaders(endHeaders, p)
- first = false
- } else {
- err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
- }
- if err != nil {
- t.Close()
- return ConnectionErrorf("transport: %v", err)
- }
- }
- return nil
-}
-
-// WriteHeader sends the header metedata md back to the client.
-func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
- s.mu.Lock()
- if s.headerOk || s.state == streamDone {
- s.mu.Unlock()
- return ErrIllegalHeaderWrite
- }
- s.headerOk = true
- s.mu.Unlock()
- if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
- return err
- }
- t.hBuf.Reset()
- t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
- if s.sendCompress != "" {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
- }
- for k, v := range md {
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
- }
- }
- if err := t.writeHeaders(s, t.hBuf, false); err != nil {
- return err
- }
- t.writableChan <- 0
- return nil
-}
-
-// WriteStatus sends stream status to the client and terminates the stream.
-// There is no further I/O operations being able to perform on this stream.
-// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
-// OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
- var headersSent bool
- s.mu.Lock()
- if s.state == streamDone {
- s.mu.Unlock()
- return nil
- }
- if s.headerOk {
- headersSent = true
- }
- s.mu.Unlock()
- if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
- return err
- }
- t.hBuf.Reset()
- if !headersSent {
- t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
- }
- t.hEnc.WriteField(
- hpack.HeaderField{
- Name: "grpc-status",
- Value: strconv.Itoa(int(statusCode)),
- })
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc})
- // Attach the trailer metadata.
- for k, v := range s.trailer {
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
- }
- }
- if err := t.writeHeaders(s, t.hBuf, true); err != nil {
- t.Close()
- return err
- }
- t.closeStream(s)
- t.writableChan <- 0
- return nil
-}
-
-// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
-// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
- // TODO(zhaoq): Support multi-writers for a single stream.
- var writeHeaderFrame bool
- s.mu.Lock()
- if !s.headerOk {
- writeHeaderFrame = true
- s.headerOk = true
- }
- s.mu.Unlock()
- if writeHeaderFrame {
- if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
- return err
- }
- t.hBuf.Reset()
- t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
- if s.sendCompress != "" {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
- }
- p := http2.HeadersFrameParam{
- StreamID: s.id,
- BlockFragment: t.hBuf.Bytes(),
- EndHeaders: true,
- }
- if err := t.framer.writeHeaders(false, p); err != nil {
- t.Close()
- return ConnectionErrorf("transport: %v", err)
- }
- t.writableChan <- 0
- }
- r := bytes.NewBuffer(data)
- for {
- if r.Len() == 0 {
- return nil
- }
- size := http2MaxFrameLen
- s.sendQuotaPool.add(0)
- // Wait until the stream has some quota to send the data.
- sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
- if err != nil {
- return err
- }
- t.sendQuotaPool.add(0)
- // Wait until the transport has some quota to send the data.
- tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
- if err != nil {
- if _, ok := err.(StreamError); ok {
- t.sendQuotaPool.cancel()
- }
- return err
- }
- if sq < size {
- size = sq
- }
- if tq < size {
- size = tq
- }
- p := r.Next(size)
- ps := len(p)
- if ps < sq {
- // Overbooked stream quota. Return it back.
- s.sendQuotaPool.add(sq - ps)
- }
- if ps < tq {
- // Overbooked transport quota. Return it back.
- t.sendQuotaPool.add(tq - ps)
- }
- t.framer.adjustNumWriters(1)
- // Got some quota. Try to acquire writing privilege on the
- // transport.
- if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
- if t.framer.adjustNumWriters(-1) == 0 {
- // This writer is the last one in this batch and has the
- // responsibility to flush the buffered frames. It queues
- // a flush request to controlBuf instead of flushing directly
- // in order to avoid the race with other writing or flushing.
- t.controlBuf.put(&flushIO{})
- }
- return err
- }
- var forceFlush bool
- if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
- forceFlush = true
- }
- if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
- t.Close()
- return ConnectionErrorf("transport: %v", err)
- }
- if t.framer.adjustNumWriters(-1) == 0 {
- t.framer.flushWrite()
- }
- t.writableChan <- 0
- }
-
-}
-
-func (t *http2Server) applySettings(ss []http2.Setting) {
- for _, s := range ss {
- if s.ID == http2.SettingInitialWindowSize {
- t.mu.Lock()
- defer t.mu.Unlock()
- for _, stream := range t.activeStreams {
- stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
- }
- t.streamSendQuota = s.Val
- }
-
- }
-}
-
-// controller running in a separate goroutine takes charge of sending control
-// frames (e.g., window update, reset stream, setting, etc.) to the server.
-func (t *http2Server) controller() {
- for {
- select {
- case i := <-t.controlBuf.get():
- t.controlBuf.load()
- select {
- case <-t.writableChan:
- switch i := i.(type) {
- case *windowUpdate:
- t.framer.writeWindowUpdate(true, i.streamID, i.increment)
- case *settings:
- if i.ack {
- t.framer.writeSettingsAck(true)
- t.applySettings(i.ss)
- } else {
- t.framer.writeSettings(true, i.ss...)
- }
- case *resetStream:
- t.framer.writeRSTStream(true, i.streamID, i.code)
- case *flushIO:
- t.framer.flushWrite()
- case *ping:
- t.framer.writePing(true, i.ack, i.data)
- default:
- grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
- }
- t.writableChan <- 0
- continue
- case <-t.shutdownChan:
- return
- }
- case <-t.shutdownChan:
- return
- }
- }
-}
-
-// Close starts shutting down the http2Server transport.
-// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
-// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() (err error) {
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- return errors.New("transport: Close() was already called")
- }
- t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
- t.mu.Unlock()
- close(t.shutdownChan)
- err = t.conn.Close()
- // Cancel all active streams.
- for _, s := range streams {
- s.cancel()
- }
- return
-}
-
-// closeStream clears the footprint of a stream when the stream is not needed
-// any more.
-func (t *http2Server) closeStream(s *Stream) {
- t.mu.Lock()
- delete(t.activeStreams, s.id)
- t.mu.Unlock()
- if q := s.fc.restoreConn(); q > 0 {
- t.controlBuf.put(&windowUpdate{0, q})
- }
- s.mu.Lock()
- if s.state == streamDone {
- s.mu.Unlock()
- return
- }
- s.state = streamDone
- s.mu.Unlock()
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
-}
-
-func (t *http2Server) RemoteAddr() net.Addr {
- return t.conn.RemoteAddr()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go
deleted file mode 100644
index 6aabcd4a36..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package transport
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
-)
-
-const (
- // The primary user agent
- primaryUA = "grpc-go/0.11"
- // http2MaxFrameLen specifies the max length of a HTTP2 frame.
- http2MaxFrameLen = 16384 // 16KB frame
- // http://http2.github.io/http2-spec/#SettingValues
- http2InitHeaderTableSize = 4096
- // http2IOBufSize specifies the buffer size for sending frames.
- http2IOBufSize = 32 * 1024
-)
-
-var (
- clientPreface = []byte(http2.ClientPreface)
- http2ErrConvTab = map[http2.ErrCode]codes.Code{
- http2.ErrCodeNo: codes.Internal,
- http2.ErrCodeProtocol: codes.Internal,
- http2.ErrCodeInternal: codes.Internal,
- http2.ErrCodeFlowControl: codes.ResourceExhausted,
- http2.ErrCodeSettingsTimeout: codes.Internal,
- http2.ErrCodeFrameSize: codes.Internal,
- http2.ErrCodeRefusedStream: codes.Unavailable,
- http2.ErrCodeCancel: codes.Canceled,
- http2.ErrCodeCompression: codes.Internal,
- http2.ErrCodeConnect: codes.Internal,
- http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
- http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
- http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
- }
- statusCodeConvTab = map[codes.Code]http2.ErrCode{
- codes.Internal: http2.ErrCodeInternal,
- codes.Canceled: http2.ErrCodeCancel,
- codes.Unavailable: http2.ErrCodeRefusedStream,
- codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
- codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
- }
-)
-
-// Records the states during HPACK decoding. Must be reset once the
-// decoding of the entire headers are finished.
-type decodeState struct {
- err error // first error encountered decoding
-
- encoding string
- // statusCode caches the stream status received from the trailer
- // the server sent. Client side only.
- statusCode codes.Code
- statusDesc string
- // Server side only fields.
- timeoutSet bool
- timeout time.Duration
- method string
- // key-value metadata map from the peer.
- mdata map[string][]string
-}
-
-// isReservedHeader checks whether hdr belongs to HTTP2 headers
-// reserved by gRPC protocol. Any other headers are classified as the
-// user-specified metadata.
-func isReservedHeader(hdr string) bool {
- if hdr != "" && hdr[0] == ':' {
- return true
- }
- switch hdr {
- case "content-type",
- "grpc-message-type",
- "grpc-encoding",
- "grpc-message",
- "grpc-status",
- "grpc-timeout",
- "te":
- return true
- default:
- return false
- }
-}
-
-func (d *decodeState) setErr(err error) {
- if d.err == nil {
- d.err = err
- }
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
- switch f.Name {
- case "content-type":
- if !strings.Contains(f.Value, "application/grpc") {
- d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
- return
- }
- case "grpc-encoding":
- d.encoding = f.Value
- case "grpc-status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
- return
- }
- d.statusCode = codes.Code(code)
- case "grpc-message":
- d.statusDesc = f.Value
- case "grpc-timeout":
- d.timeoutSet = true
- var err error
- d.timeout, err = timeoutDecode(f.Value)
- if err != nil {
- d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
- return
- }
- case ":path":
- d.method = f.Value
- default:
- if !isReservedHeader(f.Name) {
- if f.Name == "user-agent" {
- i := strings.LastIndex(f.Value, " ")
- if i == -1 {
- // There is no application user agent string being set.
- return
- }
- // Extract the application user agent string.
- f.Value = f.Value[:i]
- }
- if d.mdata == nil {
- d.mdata = make(map[string][]string)
- }
- k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
- if err != nil {
- grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
- return
- }
- d.mdata[k] = append(d.mdata[k], v)
- }
- }
-}
-
-type timeoutUnit uint8
-
-const (
- hour timeoutUnit = 'H'
- minute timeoutUnit = 'M'
- second timeoutUnit = 'S'
- millisecond timeoutUnit = 'm'
- microsecond timeoutUnit = 'u'
- nanosecond timeoutUnit = 'n'
-)
-
-func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
- switch u {
- case hour:
- return time.Hour, true
- case minute:
- return time.Minute, true
- case second:
- return time.Second, true
- case millisecond:
- return time.Millisecond, true
- case microsecond:
- return time.Microsecond, true
- case nanosecond:
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if m := d % r; m > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func timeoutEncode(t time.Duration) string {
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
-func timeoutDecode(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
- }
- unit := timeoutUnit(s[size-1])
- d, ok := timeoutUnitToDuration(unit)
- if !ok {
- return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- return d * time.Duration(t), nil
-}
-
-type framer struct {
- numWriters int32
- reader io.Reader
- writer *bufio.Writer
- fr *http2.Framer
-}
-
-func newFramer(conn net.Conn) *framer {
- f := &framer{
- reader: bufio.NewReaderSize(conn, http2IOBufSize),
- writer: bufio.NewWriterSize(conn, http2IOBufSize),
- }
- f.fr = http2.NewFramer(f.writer, f.reader)
- f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
- return f
-}
-
-func (f *framer) adjustNumWriters(i int32) int32 {
- return atomic.AddInt32(&f.numWriters, i)
-}
-
-// The following writeXXX functions can only be called when the caller gets
-// unblocked from writableChan channel (i.e., owns the privilege to write).
-
-func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
- if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
- if err := f.fr.WriteData(streamID, endStream, data); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
- if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
- if err := f.fr.WriteHeaders(p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
- if err := f.fr.WritePing(ack, data); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
- if err := f.fr.WritePriority(streamID, p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
- if err := f.fr.WritePushPromise(p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
- if err := f.fr.WriteRSTStream(streamID, code); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
- if err := f.fr.WriteSettings(settings...); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeSettingsAck(forceFlush bool) error {
- if err := f.fr.WriteSettingsAck(); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
- if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) flushWrite() error {
- return f.writer.Flush()
-}
-
-func (f *framer) readFrame() (http2.Frame, error) {
- return f.fr.ReadFrame()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go
deleted file mode 100644
index f027cae558..0000000000
--- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
-Package transport defines and implements message oriented communication channel
-to complete various transactions (e.g., an RPC).
-*/
-package transport
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net"
- "sync"
- "time"
-
- "golang.org/x/net/context"
- "golang.org/x/net/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
-)
-
-// recvMsg represents the received msg from the transport. All transport
-// protocol specific info has been removed.
-type recvMsg struct {
- data []byte
- // nil: received some data
- // io.EOF: stream is completed. data is nil.
- // other non-nil error: transport failure. data is nil.
- err error
-}
-
-func (recvMsg) isItem() bool {
- return true
-}
-
-// All items in an out of a recvBuffer should be the same type.
-type item interface {
- isItem() bool
-}
-
-// recvBuffer is an unbounded channel of item.
-type recvBuffer struct {
- c chan item
- mu sync.Mutex
- backlog []item
-}
-
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan item, 1),
- }
- return b
-}
-
-func (b *recvBuffer) put(r item) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.backlog = append(b.backlog, r)
- select {
- case b.c <- b.backlog[0]:
- b.backlog = b.backlog[1:]
- default:
- }
-}
-
-func (b *recvBuffer) load() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if len(b.backlog) > 0 {
- select {
- case b.c <- b.backlog[0]:
- b.backlog = b.backlog[1:]
- default:
- }
- }
-}
-
-// get returns the channel that receives an item in the buffer.
-//
-// Upon receipt of an item, the caller should call load to send another
-// item onto the channel if there is any.
-func (b *recvBuffer) get() <-chan item {
- return b.c
-}
-
-// recvBufferReader implements io.Reader interface to read the data from
-// recvBuffer.
-type recvBufferReader struct {
- ctx context.Context
- recv *recvBuffer
- last *bytes.Reader // Stores the remaining data in the previous calls.
- err error
-}
-
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
- if r.err != nil {
- return 0, r.err
- }
- defer func() { r.err = err }()
- if r.last != nil && r.last.Len() > 0 {
- // Read remaining data left in last call.
- return r.last.Read(p)
- }
- select {
- case <-r.ctx.Done():
- return 0, ContextErr(r.ctx.Err())
- case i := <-r.recv.get():
- r.recv.load()
- m := i.(*recvMsg)
- if m.err != nil {
- return 0, m.err
- }
- r.last = bytes.NewReader(m.data)
- return r.last.Read(p)
- }
-}
-
-type streamState uint8
-
-const (
- streamActive streamState = iota
- streamWriteDone // EndStream sent
- streamReadDone // EndStream received
- streamDone // sendDone and recvDone or RSTStreamFrame is sent or received.
-)
-
-// Stream represents an RPC in the transport layer.
-type Stream struct {
- id uint32
- // nil for client side Stream.
- st ServerTransport
- // ctx is the associated context of the stream.
- ctx context.Context
- cancel context.CancelFunc
- // method records the associated RPC method of the stream.
- method string
- recvCompress string
- sendCompress string
- buf *recvBuffer
- dec io.Reader
- fc *inFlow
- recvQuota uint32
- // The accumulated inbound quota pending for window update.
- updateQuota uint32
- // The handler to control the window update procedure for both this
- // particular stream and the associated transport.
- windowHandler func(int)
-
- sendQuotaPool *quotaPool
- // Close headerChan to indicate the end of reception of header metadata.
- headerChan chan struct{}
- // header caches the received header metadata.
- header metadata.MD
- // The key-value map of trailer metadata.
- trailer metadata.MD
-
- mu sync.RWMutex // guard the following
- // headerOK becomes true from the first header is about to send.
- headerOk bool
- state streamState
- // true iff headerChan is closed. Used to avoid closing headerChan
- // multiple times.
- headerDone bool
- // the status received from the server.
- statusCode codes.Code
- statusDesc string
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *Stream) RecvCompress() string {
- return s.recvCompress
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *Stream) SetSendCompress(str string) {
- s.sendCompress = str
-}
-
-// Header acquires the key-value pairs of header metadata once it
-// is available. It blocks until i) the metadata is ready or ii) there is no
-// header metadata or iii) the stream is cancelled/expired.
-func (s *Stream) Header() (metadata.MD, error) {
- select {
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
- case <-s.headerChan:
- return s.header.Copy(), nil
- }
-}
-
-// Trailer returns the cached trailer metedata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD. Client
-// side only.
-func (s *Stream) Trailer() metadata.MD {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return s.trailer.Copy()
-}
-
-// ServerTransport returns the underlying ServerTransport for the stream.
-// The client side stream always returns nil.
-func (s *Stream) ServerTransport() ServerTransport {
- return s.st
-}
-
-// Context returns the context of the stream.
-func (s *Stream) Context() context.Context {
- return s.ctx
-}
-
-// TraceContext recreates the context of s with a trace.Trace.
-func (s *Stream) TraceContext(tr trace.Trace) {
- s.ctx = trace.NewContext(s.ctx, tr)
-}
-
-// Method returns the method for the stream.
-func (s *Stream) Method() string {
- return s.method
-}
-
-// StatusCode returns statusCode received from the server.
-func (s *Stream) StatusCode() codes.Code {
- return s.statusCode
-}
-
-// StatusDesc returns statusDesc received from the server.
-func (s *Stream) StatusDesc() string {
- return s.statusDesc
-}
-
-// ErrIllegalTrailerSet indicates that the trailer has already been set or it
-// is too late to do so.
-var ErrIllegalTrailerSet = errors.New("transport: trailer has been set")
-
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can only be called at most once. Server side only.
-func (s *Stream) SetTrailer(md metadata.MD) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.trailer != nil {
- return ErrIllegalTrailerSet
- }
- s.trailer = md.Copy()
- return nil
-}
-
-func (s *Stream) write(m recvMsg) {
- s.buf.put(&m)
-}
-
-// Read reads all the data available for this Stream from the transport and
-// passes them into the decoder, which converts them into a gRPC message stream.
-// The error is io.EOF when the stream is done or another non-nil error if
-// the stream broke.
-func (s *Stream) Read(p []byte) (n int, err error) {
- n, err = s.dec.Read(p)
- if err != nil {
- return
- }
- s.windowHandler(n)
- return
-}
-
-// The key to save transport.Stream in the context.
-type streamKey struct{}
-
-// newContextWithStream creates a new context from ctx and attaches stream
-// to it.
-func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
- return context.WithValue(ctx, streamKey{}, stream)
-}
-
-// StreamFromContext returns the stream saved in ctx.
-func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
- s, ok = ctx.Value(streamKey{}).(*Stream)
- return
-}
-
-// state of transport
-type transportState int
-
-const (
- reachable transportState = iota
- unreachable
- closing
-)
-
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
- return newHTTP2Server(conn, maxStreams, authInfo)
-}
-
-// ConnectOptions covers all relevant options for dialing a server.
-type ConnectOptions struct {
- // UserAgent is the application user agent.
- UserAgent string
- // Dialer specifies how to dial a network address.
- Dialer func(string, time.Duration) (net.Conn, error)
- // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs.
- AuthOptions []credentials.Credentials
- // Timeout specifies the timeout for dialing a client connection.
- Timeout time.Duration
-}
-
-// NewClientTransport establishes the transport with the required ConnectOptions
-// and returns it to the caller.
-func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) {
- return newHTTP2Client(target, opts)
-}
-
-// Options provides additional hints and information for message
-// transmission.
-type Options struct {
- // Last indicates whether this write is the last piece for
- // this stream.
- Last bool
-
- // Delay is a hint to the transport implementation for whether
- // the data could be buffered for a batching write. The
- // Transport implementation may ignore the hint.
- Delay bool
-}
-
-// CallHdr carries the information of a particular RPC.
-type CallHdr struct {
- // Host specifies the peer's host.
- Host string
-
- // Method specifies the operation to perform.
- Method string
-
- // RecvCompress specifies the compression algorithm applied on
- // inbound messages.
- RecvCompress string
-
- // SendCompress specifies the compression algorithm applied on
- // outbound message.
- SendCompress string
-
- // Flush indicates whether a new stream command should be sent
- // to the peer without waiting for the first data. This is
- // only a hint. The transport may modify the flush decision
- // for performance purposes.
- Flush bool
-}
-
-// ClientTransport is the common interface for all gRPC client-side transport
-// implementations.
-type ClientTransport interface {
- // Close tears down this transport. Once it returns, the transport
- // should not be accessed any more. The caller must make sure this
- // is called only once.
- Close() error
-
- // Write sends the data for the given stream. A nil stream indicates
- // the write is to be performed on the transport as a whole.
- Write(s *Stream, data []byte, opts *Options) error
-
- // NewStream creates a Stream for an RPC.
- NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
-
- // CloseStream clears the footprint of a stream when the stream is
- // not needed any more. The err indicates the error incurred when
- // CloseStream is called. Must be called when a stream is finished
- // unless the associated transport is closing.
- CloseStream(stream *Stream, err error)
-
- // Error returns a channel that is closed when some I/O error
- // happens. Typically the caller should have a goroutine to monitor
- // this in order to take action (e.g., close the current transport
- // and create a new one) in error case. It should not return nil
- // once the transport is initiated.
- Error() <-chan struct{}
-}
-
-// ServerTransport is the common interface for all gRPC server-side transport
-// implementations.
-//
-// Methods may be called concurrently from multiple goroutines, but
-// Write methods for a given Stream will be called serially.
-type ServerTransport interface {
- // HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream))
-
- // WriteHeader sends the header metadata for the given stream.
- // WriteHeader may not be called on all streams.
- WriteHeader(s *Stream, md metadata.MD) error
-
- // Write sends the data for the given stream.
- // Write may not be called on all streams.
- Write(s *Stream, data []byte, opts *Options) error
-
- // WriteStatus sends the status of a stream to the client.
- // WriteStatus is the final call made on a stream and always
- // occurs.
- WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
-
- // Close tears down the transport. Once it is called, the transport
- // should not be accessed any more. All the pending streams and their
- // handlers will be terminated asynchronously.
- Close() error
-
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
-}
-
-// StreamErrorf creates an StreamError with the specified error code and description.
-func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
- return StreamError{
- Code: c,
- Desc: fmt.Sprintf(format, a...),
- }
-}
-
-// ConnectionErrorf creates an ConnectionError with the specified error description.
-func ConnectionErrorf(format string, a ...interface{}) ConnectionError {
- return ConnectionError{
- Desc: fmt.Sprintf(format, a...),
- }
-}
-
-// ConnectionError is an error that results in the termination of the
-// entire connection and the retry of all the active streams.
-type ConnectionError struct {
- Desc string
-}
-
-func (e ConnectionError) Error() string {
- return fmt.Sprintf("connection error: desc = %q", e.Desc)
-}
-
-// Define some common ConnectionErrors.
-var ErrConnClosing = ConnectionError{Desc: "transport is closing"}
-
-// StreamError is an error that only affects one stream within a connection.
-type StreamError struct {
- Code codes.Code
- Desc string
-}
-
-func (e StreamError) Error() string {
- return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
-}
-
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
- switch err {
- case context.DeadlineExceeded:
- return StreamErrorf(codes.DeadlineExceeded, "%v", err)
- case context.Canceled:
- return StreamErrorf(codes.Canceled, "%v", err)
- }
- panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
-}
-
-// wait blocks until it can receive from ctx.Done, closing, or proceed.
-// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
-// If it receives from closing, it returns 0, ErrConnClosing.
-// If it receives from proceed, it returns the received integer, nil.
-func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {
- select {
- case <-ctx.Done():
- return 0, ContextErr(ctx.Err())
- case <-closing:
- return 0, ErrConnClosing
- case i := <-proceed:
- return i, nil
- }
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore
deleted file mode 100644
index 191a5360b7..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-_*
-*.swp
-*.[568]
-[568].out
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE
deleted file mode 100644
index 545cf2d331..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-Gocheck - A rich testing framework for Go
-
-Copyright (c) 2010-2013 Gustavo Niemeyer
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md
deleted file mode 100644
index 0ca9e57260..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
-Instructions
-============
-
-Install the package with:
-
- go get gopkg.in/check.v1
-
-Import it with:
-
- import "gopkg.in/check.v1"
-
-and use _check_ as the package name inside the code.
-
-For more details, visit the project page:
-
-* http://labix.org/gocheck
-
-and the API documentation:
-
-* https://gopkg.in/check.v1
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO
deleted file mode 100644
index 33498270ea..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-- Assert(slice, Contains, item)
-- Parallel test support
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go
deleted file mode 100644
index 48cb8c8114..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package check
-
-import (
- "fmt"
- "runtime"
- "time"
-)
-
-var memStats runtime.MemStats
-
-// testingB is a type passed to Benchmark functions to manage benchmark
-// timing and to specify the number of iterations to run.
-type timer struct {
- start time.Time // Time test or benchmark started
- duration time.Duration
- N int
- bytes int64
- timerOn bool
- benchTime time.Duration
- // The initial states of memStats.Mallocs and memStats.TotalAlloc.
- startAllocs uint64
- startBytes uint64
- // The net total of this test after being run.
- netAllocs uint64
- netBytes uint64
-}
-
-// StartTimer starts timing a test. This function is called automatically
-// before a benchmark starts, but it can also used to resume timing after
-// a call to StopTimer.
-func (c *C) StartTimer() {
- if !c.timerOn {
- c.start = time.Now()
- c.timerOn = true
-
- runtime.ReadMemStats(&memStats)
- c.startAllocs = memStats.Mallocs
- c.startBytes = memStats.TotalAlloc
- }
-}
-
-// StopTimer stops timing a test. This can be used to pause the timer
-// while performing complex initialization that you don't
-// want to measure.
-func (c *C) StopTimer() {
- if c.timerOn {
- c.duration += time.Now().Sub(c.start)
- c.timerOn = false
- runtime.ReadMemStats(&memStats)
- c.netAllocs += memStats.Mallocs - c.startAllocs
- c.netBytes += memStats.TotalAlloc - c.startBytes
- }
-}
-
-// ResetTimer sets the elapsed benchmark time to zero.
-// It does not affect whether the timer is running.
-func (c *C) ResetTimer() {
- if c.timerOn {
- c.start = time.Now()
- runtime.ReadMemStats(&memStats)
- c.startAllocs = memStats.Mallocs
- c.startBytes = memStats.TotalAlloc
- }
- c.duration = 0
- c.netAllocs = 0
- c.netBytes = 0
-}
-
-// SetBytes informs the number of bytes that the benchmark processes
-// on each iteration. If this is called in a benchmark it will also
-// report MB/s.
-func (c *C) SetBytes(n int64) {
- c.bytes = n
-}
-
-func (c *C) nsPerOp() int64 {
- if c.N <= 0 {
- return 0
- }
- return c.duration.Nanoseconds() / int64(c.N)
-}
-
-func (c *C) mbPerSec() float64 {
- if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
- return 0
- }
- return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
-}
-
-func (c *C) timerString() string {
- if c.N <= 0 {
- return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
- }
- mbs := c.mbPerSec()
- mb := ""
- if mbs != 0 {
- mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
- }
- nsop := c.nsPerOp()
- ns := fmt.Sprintf("%10d ns/op", nsop)
- if c.N > 0 && nsop < 100 {
- // The format specifiers here make sure that
- // the ones digits line up for all three possible formats.
- if nsop < 10 {
- ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
- } else {
- ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
- }
- }
- memStats := ""
- if c.benchMem {
- allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
- allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
- memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
- }
- return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
-}
-
-func min(x, y int) int {
- if x > y {
- return y
- }
- return x
-}
-
-func max(x, y int) int {
- if x < y {
- return y
- }
- return x
-}
-
-// roundDown10 rounds a number down to the nearest power of 10.
-func roundDown10(n int) int {
- var tens = 0
- // tens = floor(log_10(n))
- for n > 10 {
- n = n / 10
- tens++
- }
- // result = 10^tens
- result := 1
- for i := 0; i < tens; i++ {
- result *= 10
- }
- return result
-}
-
-// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
-func roundUp(n int) int {
- base := roundDown10(n)
- if n < (2 * base) {
- return 2 * base
- }
- if n < (5 * base) {
- return 5 * base
- }
- return 10 * base
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go
deleted file mode 100644
index ca8c0f92de..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go
+++ /dev/null
@@ -1,945 +0,0 @@
-// Package check is a rich testing extension for Go's testing package.
-//
-// For details about the project, see:
-//
-// http://labix.org/gocheck
-//
-package check
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "math/rand"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// -----------------------------------------------------------------------
-// Internal type which deals with suite method calling.
-
-const (
- fixtureKd = iota
- testKd
-)
-
-type funcKind int
-
-const (
- succeededSt = iota
- failedSt
- skippedSt
- panickedSt
- fixturePanickedSt
- missedSt
-)
-
-type funcStatus int
-
-// A method value can't reach its own Method structure.
-type methodType struct {
- reflect.Value
- Info reflect.Method
-}
-
-func newMethod(receiver reflect.Value, i int) *methodType {
- return &methodType{receiver.Method(i), receiver.Type().Method(i)}
-}
-
-func (method *methodType) PC() uintptr {
- return method.Info.Func.Pointer()
-}
-
-func (method *methodType) suiteName() string {
- t := method.Info.Type.In(0)
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- return t.Name()
-}
-
-func (method *methodType) String() string {
- return method.suiteName() + "." + method.Info.Name
-}
-
-func (method *methodType) matches(re *regexp.Regexp) bool {
- return (re.MatchString(method.Info.Name) ||
- re.MatchString(method.suiteName()) ||
- re.MatchString(method.String()))
-}
-
-type C struct {
- method *methodType
- kind funcKind
- testName string
- status funcStatus
- logb *logger
- logw io.Writer
- done chan *C
- reason string
- mustFail bool
- tempDir *tempDir
- benchMem bool
- startTime time.Time
- timer
-}
-
-func (c *C) stopNow() {
- runtime.Goexit()
-}
-
-// logger is a concurrency safe byte.Buffer
-type logger struct {
- sync.Mutex
- writer bytes.Buffer
-}
-
-func (l *logger) Write(buf []byte) (int, error) {
- l.Lock()
- defer l.Unlock()
- return l.writer.Write(buf)
-}
-
-func (l *logger) WriteTo(w io.Writer) (int64, error) {
- l.Lock()
- defer l.Unlock()
- return l.writer.WriteTo(w)
-}
-
-func (l *logger) String() string {
- l.Lock()
- defer l.Unlock()
- return l.writer.String()
-}
-
-// -----------------------------------------------------------------------
-// Handling of temporary files and directories.
-
-type tempDir struct {
- sync.Mutex
- path string
- counter int
-}
-
-func (td *tempDir) newPath() string {
- td.Lock()
- defer td.Unlock()
- if td.path == "" {
- var err error
- for i := 0; i != 100; i++ {
- path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
- if err = os.Mkdir(path, 0700); err == nil {
- td.path = path
- break
- }
- }
- if td.path == "" {
- panic("Couldn't create temporary directory: " + err.Error())
- }
- }
- result := filepath.Join(td.path, strconv.Itoa(td.counter))
- td.counter += 1
- return result
-}
-
-func (td *tempDir) removeAll() {
- td.Lock()
- defer td.Unlock()
- if td.path != "" {
- err := os.RemoveAll(td.path)
- if err != nil {
- fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
- }
- }
-}
-
-// Create a new temporary directory which is automatically removed after
-// the suite finishes running.
-func (c *C) MkDir() string {
- path := c.tempDir.newPath()
- if err := os.Mkdir(path, 0700); err != nil {
- panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
- }
- return path
-}
-
-// -----------------------------------------------------------------------
-// Low-level logging functions.
-
-func (c *C) log(args ...interface{}) {
- c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
-}
-
-func (c *C) logf(format string, args ...interface{}) {
- c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
-}
-
-func (c *C) logNewLine() {
- c.writeLog([]byte{'\n'})
-}
-
-func (c *C) writeLog(buf []byte) {
- c.logb.Write(buf)
- if c.logw != nil {
- c.logw.Write(buf)
- }
-}
-
-func hasStringOrError(x interface{}) (ok bool) {
- _, ok = x.(fmt.Stringer)
- if ok {
- return
- }
- _, ok = x.(error)
- return
-}
-
-func (c *C) logValue(label string, value interface{}) {
- if label == "" {
- if hasStringOrError(value) {
- c.logf("... %#v (%q)", value, value)
- } else {
- c.logf("... %#v", value)
- }
- } else if value == nil {
- c.logf("... %s = nil", label)
- } else {
- if hasStringOrError(value) {
- fv := fmt.Sprintf("%#v", value)
- qv := fmt.Sprintf("%q", value)
- if fv != qv {
- c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
- return
- }
- }
- if s, ok := value.(string); ok && isMultiLine(s) {
- c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
- c.logMultiLine(s)
- } else {
- c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
- }
- }
-}
-
-func (c *C) logMultiLine(s string) {
- b := make([]byte, 0, len(s)*2)
- i := 0
- n := len(s)
- for i < n {
- j := i + 1
- for j < n && s[j-1] != '\n' {
- j++
- }
- b = append(b, "... "...)
- b = strconv.AppendQuote(b, s[i:j])
- if j < n {
- b = append(b, " +"...)
- }
- b = append(b, '\n')
- i = j
- }
- c.writeLog(b)
-}
-
-func isMultiLine(s string) bool {
- for i := 0; i+1 < len(s); i++ {
- if s[i] == '\n' {
- return true
- }
- }
- return false
-}
-
-func (c *C) logString(issue string) {
- c.log("... ", issue)
-}
-
-func (c *C) logCaller(skip int) {
- // This is a bit heavier than it ought to be.
- skip += 1 // Our own frame.
- pc, callerFile, callerLine, ok := runtime.Caller(skip)
- if !ok {
- return
- }
- var testFile string
- var testLine int
- testFunc := runtime.FuncForPC(c.method.PC())
- if runtime.FuncForPC(pc) != testFunc {
- for {
- skip += 1
- if pc, file, line, ok := runtime.Caller(skip); ok {
- // Note that the test line may be different on
- // distinct calls for the same test. Showing
- // the "internal" line is helpful when debugging.
- if runtime.FuncForPC(pc) == testFunc {
- testFile, testLine = file, line
- break
- }
- } else {
- break
- }
- }
- }
- if testFile != "" && (testFile != callerFile || testLine != callerLine) {
- c.logCode(testFile, testLine)
- }
- c.logCode(callerFile, callerLine)
-}
-
-func (c *C) logCode(path string, line int) {
- c.logf("%s:%d:", nicePath(path), line)
- code, err := printLine(path, line)
- if code == "" {
- code = "..." // XXX Open the file and take the raw line.
- if err != nil {
- code += err.Error()
- }
- }
- c.log(indent(code, " "))
-}
-
-var valueGo = filepath.Join("reflect", "value.go")
-var asmGo = filepath.Join("runtime", "asm_")
-
-func (c *C) logPanic(skip int, value interface{}) {
- skip++ // Our own frame.
- initialSkip := skip
- for ; ; skip++ {
- if pc, file, line, ok := runtime.Caller(skip); ok {
- if skip == initialSkip {
- c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
- }
- name := niceFuncName(pc)
- path := nicePath(file)
- if strings.Contains(path, "/gopkg.in/check.v") {
- continue
- }
- if name == "Value.call" && strings.HasSuffix(path, valueGo) {
- continue
- }
- if name == "call16" && strings.Contains(path, asmGo) {
- continue
- }
- c.logf("%s:%d\n in %s", nicePath(file), line, name)
- } else {
- break
- }
- }
-}
-
-func (c *C) logSoftPanic(issue string) {
- c.log("... Panic: ", issue)
-}
-
-func (c *C) logArgPanic(method *methodType, expectedType string) {
- c.logf("... Panic: %s argument should be %s",
- niceFuncName(method.PC()), expectedType)
-}
-
-// -----------------------------------------------------------------------
-// Some simple formatting helpers.
-
-var initWD, initWDErr = os.Getwd()
-
-func init() {
- if initWDErr == nil {
- initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
- }
-}
-
-func nicePath(path string) string {
- if initWDErr == nil {
- if strings.HasPrefix(path, initWD) {
- return path[len(initWD):]
- }
- }
- return path
-}
-
-func niceFuncPath(pc uintptr) string {
- function := runtime.FuncForPC(pc)
- if function != nil {
- filename, line := function.FileLine(pc)
- return fmt.Sprintf("%s:%d", nicePath(filename), line)
- }
- return ""
-}
-
-func niceFuncName(pc uintptr) string {
- function := runtime.FuncForPC(pc)
- if function != nil {
- name := path.Base(function.Name())
- if i := strings.Index(name, "."); i > 0 {
- name = name[i+1:]
- }
- if strings.HasPrefix(name, "(*") {
- if i := strings.Index(name, ")"); i > 0 {
- name = name[2:i] + name[i+1:]
- }
- }
- if i := strings.LastIndex(name, ".*"); i != -1 {
- name = name[:i] + "." + name[i+2:]
- }
- if i := strings.LastIndex(name, "·"); i != -1 {
- name = name[:i] + "." + name[i+2:]
- }
- return name
- }
- return ""
-}
-
-// -----------------------------------------------------------------------
-// Result tracker to aggregate call results.
-
-type Result struct {
- Succeeded int
- Failed int
- Skipped int
- Panicked int
- FixturePanicked int
- ExpectedFailures int
- Missed int // Not even tried to run, related to a panic in the fixture.
- RunError error // Houston, we've got a problem.
- WorkDir string // If KeepWorkDir is true
-}
-
-type resultTracker struct {
- result Result
- _lastWasProblem bool
- _waiting int
- _missed int
- _expectChan chan *C
- _doneChan chan *C
- _stopChan chan bool
-}
-
-func newResultTracker() *resultTracker {
- return &resultTracker{_expectChan: make(chan *C), // Synchronous
- _doneChan: make(chan *C, 32), // Asynchronous
- _stopChan: make(chan bool)} // Synchronous
-}
-
-func (tracker *resultTracker) start() {
- go tracker._loopRoutine()
-}
-
-func (tracker *resultTracker) waitAndStop() {
- <-tracker._stopChan
-}
-
-func (tracker *resultTracker) expectCall(c *C) {
- tracker._expectChan <- c
-}
-
-func (tracker *resultTracker) callDone(c *C) {
- tracker._doneChan <- c
-}
-
-func (tracker *resultTracker) _loopRoutine() {
- for {
- var c *C
- if tracker._waiting > 0 {
- // Calls still running. Can't stop.
- select {
- // XXX Reindent this (not now to make diff clear)
- case c = <-tracker._expectChan:
- tracker._waiting += 1
- case c = <-tracker._doneChan:
- tracker._waiting -= 1
- switch c.status {
- case succeededSt:
- if c.kind == testKd {
- if c.mustFail {
- tracker.result.ExpectedFailures++
- } else {
- tracker.result.Succeeded++
- }
- }
- case failedSt:
- tracker.result.Failed++
- case panickedSt:
- if c.kind == fixtureKd {
- tracker.result.FixturePanicked++
- } else {
- tracker.result.Panicked++
- }
- case fixturePanickedSt:
- // Track it as missed, since the panic
- // was on the fixture, not on the test.
- tracker.result.Missed++
- case missedSt:
- tracker.result.Missed++
- case skippedSt:
- if c.kind == testKd {
- tracker.result.Skipped++
- }
- }
- }
- } else {
- // No calls. Can stop, but no done calls here.
- select {
- case tracker._stopChan <- true:
- return
- case c = <-tracker._expectChan:
- tracker._waiting += 1
- case c = <-tracker._doneChan:
- panic("Tracker got an unexpected done call.")
- }
- }
- }
-}
-
-// -----------------------------------------------------------------------
-// The underlying suite runner.
-
-type suiteRunner struct {
- suite interface{}
- setUpSuite, tearDownSuite *methodType
- setUpTest, tearDownTest *methodType
- tests []*methodType
- tracker *resultTracker
- tempDir *tempDir
- keepDir bool
- output *outputWriter
- reportedProblemLast bool
- benchTime time.Duration
- benchMem bool
-}
-
-type RunConf struct {
- Output io.Writer
- Stream bool
- Verbose bool
- Filter string
- Benchmark bool
- BenchmarkTime time.Duration // Defaults to 1 second
- BenchmarkMem bool
- KeepWorkDir bool
-}
-
-// Create a new suiteRunner able to run all methods in the given suite.
-func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
- var conf RunConf
- if runConf != nil {
- conf = *runConf
- }
- if conf.Output == nil {
- conf.Output = os.Stdout
- }
- if conf.Benchmark {
- conf.Verbose = true
- }
-
- suiteType := reflect.TypeOf(suite)
- suiteNumMethods := suiteType.NumMethod()
- suiteValue := reflect.ValueOf(suite)
-
- runner := &suiteRunner{
- suite: suite,
- output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
- tracker: newResultTracker(),
- benchTime: conf.BenchmarkTime,
- benchMem: conf.BenchmarkMem,
- tempDir: &tempDir{},
- keepDir: conf.KeepWorkDir,
- tests: make([]*methodType, 0, suiteNumMethods),
- }
- if runner.benchTime == 0 {
- runner.benchTime = 1 * time.Second
- }
-
- var filterRegexp *regexp.Regexp
- if conf.Filter != "" {
- if regexp, err := regexp.Compile(conf.Filter); err != nil {
- msg := "Bad filter expression: " + err.Error()
- runner.tracker.result.RunError = errors.New(msg)
- return runner
- } else {
- filterRegexp = regexp
- }
- }
-
- for i := 0; i != suiteNumMethods; i++ {
- method := newMethod(suiteValue, i)
- switch method.Info.Name {
- case "SetUpSuite":
- runner.setUpSuite = method
- case "TearDownSuite":
- runner.tearDownSuite = method
- case "SetUpTest":
- runner.setUpTest = method
- case "TearDownTest":
- runner.tearDownTest = method
- default:
- prefix := "Test"
- if conf.Benchmark {
- prefix = "Benchmark"
- }
- if !strings.HasPrefix(method.Info.Name, prefix) {
- continue
- }
- if filterRegexp == nil || method.matches(filterRegexp) {
- runner.tests = append(runner.tests, method)
- }
- }
- }
- return runner
-}
-
-// Run all methods in the given suite.
-func (runner *suiteRunner) run() *Result {
- if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
- runner.tracker.start()
- if runner.checkFixtureArgs() {
- c := runner.runFixture(runner.setUpSuite, "", nil)
- if c == nil || c.status == succeededSt {
- for i := 0; i != len(runner.tests); i++ {
- c := runner.runTest(runner.tests[i])
- if c.status == fixturePanickedSt {
- runner.skipTests(missedSt, runner.tests[i+1:])
- break
- }
- }
- } else if c != nil && c.status == skippedSt {
- runner.skipTests(skippedSt, runner.tests)
- } else {
- runner.skipTests(missedSt, runner.tests)
- }
- runner.runFixture(runner.tearDownSuite, "", nil)
- } else {
- runner.skipTests(missedSt, runner.tests)
- }
- runner.tracker.waitAndStop()
- if runner.keepDir {
- runner.tracker.result.WorkDir = runner.tempDir.path
- } else {
- runner.tempDir.removeAll()
- }
- }
- return &runner.tracker.result
-}
-
-// Create a call object with the given suite method, and fork a
-// goroutine with the provided dispatcher for running it.
-func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
- var logw io.Writer
- if runner.output.Stream {
- logw = runner.output
- }
- if logb == nil {
- logb = new(logger)
- }
- c := &C{
- method: method,
- kind: kind,
- testName: testName,
- logb: logb,
- logw: logw,
- tempDir: runner.tempDir,
- done: make(chan *C, 1),
- timer: timer{benchTime: runner.benchTime},
- startTime: time.Now(),
- benchMem: runner.benchMem,
- }
- runner.tracker.expectCall(c)
- go (func() {
- runner.reportCallStarted(c)
- defer runner.callDone(c)
- dispatcher(c)
- })()
- return c
-}
-
-// Same as forkCall(), but wait for call to finish before returning.
-func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
- c := runner.forkCall(method, kind, testName, logb, dispatcher)
- <-c.done
- return c
-}
-
-// Handle a finished call. If there were any panics, update the call status
-// accordingly. Then, mark the call as done and report to the tracker.
-func (runner *suiteRunner) callDone(c *C) {
- value := recover()
- if value != nil {
- switch v := value.(type) {
- case *fixturePanic:
- if v.status == skippedSt {
- c.status = skippedSt
- } else {
- c.logSoftPanic("Fixture has panicked (see related PANIC)")
- c.status = fixturePanickedSt
- }
- default:
- c.logPanic(1, value)
- c.status = panickedSt
- }
- }
- if c.mustFail {
- switch c.status {
- case failedSt:
- c.status = succeededSt
- case succeededSt:
- c.status = failedSt
- c.logString("Error: Test succeeded, but was expected to fail")
- c.logString("Reason: " + c.reason)
- }
- }
-
- runner.reportCallDone(c)
- c.done <- c
-}
-
-// Runs a fixture call synchronously. The fixture will still be run in a
-// goroutine like all suite methods, but this method will not return
-// while the fixture goroutine is not done, because the fixture must be
-// run in a desired order.
-func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
- if method != nil {
- c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
- c.ResetTimer()
- c.StartTimer()
- defer c.StopTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- })
- return c
- }
- return nil
-}
-
-// Run the fixture method with runFixture(), but panic with a fixturePanic{}
-// in case the fixture method panics. This makes it easier to track the
-// fixture panic together with other call panics within forkTest().
-func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
- if skipped != nil && *skipped {
- return nil
- }
- c := runner.runFixture(method, testName, logb)
- if c != nil && c.status != succeededSt {
- if skipped != nil {
- *skipped = c.status == skippedSt
- }
- panic(&fixturePanic{c.status, method})
- }
- return c
-}
-
-type fixturePanic struct {
- status funcStatus
- method *methodType
-}
-
-// Run the suite test method, together with the test-specific fixture,
-// asynchronously.
-func (runner *suiteRunner) forkTest(method *methodType) *C {
- testName := method.String()
- return runner.forkCall(method, testKd, testName, nil, func(c *C) {
- var skipped bool
- defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
- defer c.StopTimer()
- benchN := 1
- for {
- runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
- mt := c.method.Type()
- if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
- // Rather than a plain panic, provide a more helpful message when
- // the argument type is incorrect.
- c.status = panickedSt
- c.logArgPanic(c.method, "*check.C")
- return
- }
- if strings.HasPrefix(c.method.Info.Name, "Test") {
- c.ResetTimer()
- c.StartTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- return
- }
- if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
- panic("unexpected method prefix: " + c.method.Info.Name)
- }
-
- runtime.GC()
- c.N = benchN
- c.ResetTimer()
- c.StartTimer()
- c.method.Call([]reflect.Value{reflect.ValueOf(c)})
- c.StopTimer()
- if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
- return
- }
- perOpN := int(1e9)
- if c.nsPerOp() != 0 {
- perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
- }
-
- // Logic taken from the stock testing package:
- // - Run more iterations than we think we'll need for a second (1.5x).
- // - Don't grow too fast in case we had timing errors previously.
- // - Be sure to run at least one more than last time.
- benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
- benchN = roundUp(benchN)
-
- skipped = true // Don't run the deferred one if this panics.
- runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
- skipped = false
- }
- })
-}
-
-// Same as forkTest(), but wait for the test to finish before returning.
-func (runner *suiteRunner) runTest(method *methodType) *C {
- c := runner.forkTest(method)
- <-c.done
- return c
-}
-
-// Helper to mark tests as skipped or missed. A bit heavy for what
-// it does, but it enables homogeneous handling of tracking, including
-// nice verbose output.
-func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
- for _, method := range methods {
- runner.runFunc(method, testKd, "", nil, func(c *C) {
- c.status = status
- })
- }
-}
-
-// Verify if the fixture arguments are *check.C. In case of errors,
-// log the error as a panic in the fixture method call, and return false.
-func (runner *suiteRunner) checkFixtureArgs() bool {
- succeeded := true
- argType := reflect.TypeOf(&C{})
- for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
- if method != nil {
- mt := method.Type()
- if mt.NumIn() != 1 || mt.In(0) != argType {
- succeeded = false
- runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
- c.logArgPanic(method, "*check.C")
- c.status = panickedSt
- })
- }
- }
- }
- return succeeded
-}
-
-func (runner *suiteRunner) reportCallStarted(c *C) {
- runner.output.WriteCallStarted("START", c)
-}
-
-func (runner *suiteRunner) reportCallDone(c *C) {
- runner.tracker.callDone(c)
- switch c.status {
- case succeededSt:
- if c.mustFail {
- runner.output.WriteCallSuccess("FAIL EXPECTED", c)
- } else {
- runner.output.WriteCallSuccess("PASS", c)
- }
- case skippedSt:
- runner.output.WriteCallSuccess("SKIP", c)
- case failedSt:
- runner.output.WriteCallProblem("FAIL", c)
- case panickedSt:
- runner.output.WriteCallProblem("PANIC", c)
- case fixturePanickedSt:
- // That's a testKd call reporting that its fixture
- // has panicked. The fixture call which caused the
- // panic itself was tracked above. We'll report to
- // aid debugging.
- runner.output.WriteCallProblem("PANIC", c)
- case missedSt:
- runner.output.WriteCallSuccess("MISS", c)
- }
-}
-
-// -----------------------------------------------------------------------
-// Output writer manages atomic output writing according to settings.
-
-type outputWriter struct {
- m sync.Mutex
- writer io.Writer
- wroteCallProblemLast bool
- Stream bool
- Verbose bool
-}
-
-func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
- return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
-}
-
-func (ow *outputWriter) Write(content []byte) (n int, err error) {
- ow.m.Lock()
- n, err = ow.writer.Write(content)
- ow.m.Unlock()
- return
-}
-
-func (ow *outputWriter) WriteCallStarted(label string, c *C) {
- if ow.Stream {
- header := renderCallHeader(label, c, "", "\n")
- ow.m.Lock()
- ow.writer.Write([]byte(header))
- ow.m.Unlock()
- }
-}
-
-func (ow *outputWriter) WriteCallProblem(label string, c *C) {
- var prefix string
- if !ow.Stream {
- prefix = "\n-----------------------------------" +
- "-----------------------------------\n"
- }
- header := renderCallHeader(label, c, prefix, "\n\n")
- ow.m.Lock()
- ow.wroteCallProblemLast = true
- ow.writer.Write([]byte(header))
- if !ow.Stream {
- c.logb.WriteTo(ow.writer)
- }
- ow.m.Unlock()
-}
-
-func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
- if ow.Stream || (ow.Verbose && c.kind == testKd) {
- // TODO Use a buffer here.
- var suffix string
- if c.reason != "" {
- suffix = " (" + c.reason + ")"
- }
- if c.status == succeededSt {
- suffix += "\t" + c.timerString()
- }
- suffix += "\n"
- if ow.Stream {
- suffix += "\n"
- }
- header := renderCallHeader(label, c, "", suffix)
- ow.m.Lock()
- // Resist temptation of using line as prefix above due to race.
- if !ow.Stream && ow.wroteCallProblemLast {
- header = "\n-----------------------------------" +
- "-----------------------------------\n" +
- header
- }
- ow.wroteCallProblemLast = false
- ow.writer.Write([]byte(header))
- ow.m.Unlock()
- }
-}
-
-func renderCallHeader(label string, c *C, prefix, suffix string) string {
- pc := c.method.PC()
- return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
- niceFuncName(pc), suffix)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go
deleted file mode 100644
index bac338729c..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package check
-
-import (
- "fmt"
- "reflect"
- "regexp"
-)
-
-// -----------------------------------------------------------------------
-// CommentInterface and Commentf helper, to attach extra information to checks.
-
-type comment struct {
- format string
- args []interface{}
-}
-
-// Commentf returns an infomational value to use with Assert or Check calls.
-// If the checker test fails, the provided arguments will be passed to
-// fmt.Sprintf, and will be presented next to the logged failure.
-//
-// For example:
-//
-// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
-//
-// Note that if the comment is constant, a better option is to
-// simply use a normal comment right above or next to the line, as
-// it will also get printed with any errors:
-//
-// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
-//
-func Commentf(format string, args ...interface{}) CommentInterface {
- return &comment{format, args}
-}
-
-// CommentInterface must be implemented by types that attach extra
-// information to failed checks. See the Commentf function for details.
-type CommentInterface interface {
- CheckCommentString() string
-}
-
-func (c *comment) CheckCommentString() string {
- return fmt.Sprintf(c.format, c.args...)
-}
-
-// -----------------------------------------------------------------------
-// The Checker interface.
-
-// The Checker interface must be provided by checkers used with
-// the Assert and Check verification methods.
-type Checker interface {
- Info() *CheckerInfo
- Check(params []interface{}, names []string) (result bool, error string)
-}
-
-// See the Checker interface.
-type CheckerInfo struct {
- Name string
- Params []string
-}
-
-func (info *CheckerInfo) Info() *CheckerInfo {
- return info
-}
-
-// -----------------------------------------------------------------------
-// Not checker logic inverter.
-
-// The Not checker inverts the logic of the provided checker. The
-// resulting checker will succeed where the original one failed, and
-// vice-versa.
-//
-// For example:
-//
-// c.Assert(a, Not(Equals), b)
-//
-func Not(checker Checker) Checker {
- return ¬Checker{checker}
-}
-
-type notChecker struct {
- sub Checker
-}
-
-func (checker *notChecker) Info() *CheckerInfo {
- info := *checker.sub.Info()
- info.Name = "Not(" + info.Name + ")"
- return &info
-}
-
-func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
- result, error = checker.sub.Check(params, names)
- result = !result
- return
-}
-
-// -----------------------------------------------------------------------
-// IsNil checker.
-
-type isNilChecker struct {
- *CheckerInfo
-}
-
-// The IsNil checker tests whether the obtained value is nil.
-//
-// For example:
-//
-// c.Assert(err, IsNil)
-//
-var IsNil Checker = &isNilChecker{
- &CheckerInfo{Name: "IsNil", Params: []string{"value"}},
-}
-
-func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return isNil(params[0]), ""
-}
-
-func isNil(obtained interface{}) (result bool) {
- if obtained == nil {
- result = true
- } else {
- switch v := reflect.ValueOf(obtained); v.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- }
- return
-}
-
-// -----------------------------------------------------------------------
-// NotNil checker. Alias for Not(IsNil), since it's so common.
-
-type notNilChecker struct {
- *CheckerInfo
-}
-
-// The NotNil checker verifies that the obtained value is not nil.
-//
-// For example:
-//
-// c.Assert(iface, NotNil)
-//
-// This is an alias for Not(IsNil), made available since it's a
-// fairly common check.
-//
-var NotNil Checker = ¬NilChecker{
- &CheckerInfo{Name: "NotNil", Params: []string{"value"}},
-}
-
-func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return !isNil(params[0]), ""
-}
-
-// -----------------------------------------------------------------------
-// Equals checker.
-
-type equalsChecker struct {
- *CheckerInfo
-}
-
-// The Equals checker verifies that the obtained value is equal to
-// the expected value, according to usual Go semantics for ==.
-//
-// For example:
-//
-// c.Assert(value, Equals, 42)
-//
-var Equals Checker = &equalsChecker{
- &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
-}
-
-func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- defer func() {
- if v := recover(); v != nil {
- result = false
- error = fmt.Sprint(v)
- }
- }()
- return params[0] == params[1], ""
-}
-
-// -----------------------------------------------------------------------
-// DeepEquals checker.
-
-type deepEqualsChecker struct {
- *CheckerInfo
-}
-
-// The DeepEquals checker verifies that the obtained value is deep-equal to
-// the expected value. The check will work correctly even when facing
-// slices, interfaces, and values of different types (which always fail
-// the test).
-//
-// For example:
-//
-// c.Assert(value, DeepEquals, 42)
-// c.Assert(array, DeepEquals, []string{"hi", "there"})
-//
-var DeepEquals Checker = &deepEqualsChecker{
- &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
-}
-
-func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return reflect.DeepEqual(params[0], params[1]), ""
-}
-
-// -----------------------------------------------------------------------
-// HasLen checker.
-
-type hasLenChecker struct {
- *CheckerInfo
-}
-
-// The HasLen checker verifies that the obtained value has the
-// provided length. In many cases this is superior to using Equals
-// in conjuction with the len function because in case the check
-// fails the value itself will be printed, instead of its length,
-// providing more details for figuring the problem.
-//
-// For example:
-//
-// c.Assert(list, HasLen, 5)
-//
-var HasLen Checker = &hasLenChecker{
- &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
-}
-
-func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
- n, ok := params[1].(int)
- if !ok {
- return false, "n must be an int"
- }
- value := reflect.ValueOf(params[0])
- switch value.Kind() {
- case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
- default:
- return false, "obtained value type has no length"
- }
- return value.Len() == n, ""
-}
-
-// -----------------------------------------------------------------------
-// ErrorMatches checker.
-
-type errorMatchesChecker struct {
- *CheckerInfo
-}
-
-// The ErrorMatches checker verifies that the error value
-// is non nil and matches the regular expression provided.
-//
-// For example:
-//
-// c.Assert(err, ErrorMatches, "perm.*denied")
-//
-var ErrorMatches Checker = errorMatchesChecker{
- &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
-}
-
-func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
- if params[0] == nil {
- return false, "Error value is nil"
- }
- err, ok := params[0].(error)
- if !ok {
- return false, "Value is not an error"
- }
- params[0] = err.Error()
- names[0] = "error"
- return matches(params[0], params[1])
-}
-
-// -----------------------------------------------------------------------
-// Matches checker.
-
-type matchesChecker struct {
- *CheckerInfo
-}
-
-// The Matches checker verifies that the string provided as the obtained
-// value (or the string resulting from obtained.String()) matches the
-// regular expression provided.
-//
-// For example:
-//
-// c.Assert(err, Matches, "perm.*denied")
-//
-var Matches Checker = &matchesChecker{
- &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
-}
-
-func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
- return matches(params[0], params[1])
-}
-
-func matches(value, regex interface{}) (result bool, error string) {
- reStr, ok := regex.(string)
- if !ok {
- return false, "Regex must be a string"
- }
- valueStr, valueIsStr := value.(string)
- if !valueIsStr {
- if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
- valueStr, valueIsStr = valueWithStr.String(), true
- }
- }
- if valueIsStr {
- matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
- if err != nil {
- return false, "Can't compile regex: " + err.Error()
- }
- return matches, ""
- }
- return false, "Obtained value is not a string and has no .String()"
-}
-
-// -----------------------------------------------------------------------
-// Panics checker.
-
-type panicsChecker struct {
- *CheckerInfo
-}
-
-// The Panics checker verifies that calling the provided zero-argument
-// function will cause a panic which is deep-equal to the provided value.
-//
-// For example:
-//
-// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
-//
-//
-var Panics Checker = &panicsChecker{
- &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
-}
-
-func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- f := reflect.ValueOf(params[0])
- if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
- return false, "Function must take zero arguments"
- }
- defer func() {
- // If the function has not panicked, then don't do the check.
- if error != "" {
- return
- }
- params[0] = recover()
- names[0] = "panic"
- result = reflect.DeepEqual(params[0], params[1])
- }()
- f.Call(nil)
- return false, "Function has not panicked"
-}
-
-type panicMatchesChecker struct {
- *CheckerInfo
-}
-
-// The PanicMatches checker verifies that calling the provided zero-argument
-// function will cause a panic with an error value matching
-// the regular expression provided.
-//
-// For example:
-//
-// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
-//
-//
-var PanicMatches Checker = &panicMatchesChecker{
- &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
-}
-
-func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
- f := reflect.ValueOf(params[0])
- if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
- return false, "Function must take zero arguments"
- }
- defer func() {
- // If the function has not panicked, then don't do the check.
- if errmsg != "" {
- return
- }
- obtained := recover()
- names[0] = "panic"
- if e, ok := obtained.(error); ok {
- params[0] = e.Error()
- } else if _, ok := obtained.(string); ok {
- params[0] = obtained
- } else {
- errmsg = "Panic value is not a string or an error"
- return
- }
- result, errmsg = matches(params[0], params[1])
- }()
- f.Call(nil)
- return false, "Function has not panicked"
-}
-
-// -----------------------------------------------------------------------
-// FitsTypeOf checker.
-
-type fitsTypeChecker struct {
- *CheckerInfo
-}
-
-// The FitsTypeOf checker verifies that the obtained value is
-// assignable to a variable with the same type as the provided
-// sample value.
-//
-// For example:
-//
-// c.Assert(value, FitsTypeOf, int64(0))
-// c.Assert(value, FitsTypeOf, os.Error(nil))
-//
-var FitsTypeOf Checker = &fitsTypeChecker{
- &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
-}
-
-func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
- obtained := reflect.ValueOf(params[0])
- sample := reflect.ValueOf(params[1])
- if !obtained.IsValid() {
- return false, ""
- }
- if !sample.IsValid() {
- return false, "Invalid sample value"
- }
- return obtained.Type().AssignableTo(sample.Type()), ""
-}
-
-// -----------------------------------------------------------------------
-// Implements checker.
-
-type implementsChecker struct {
- *CheckerInfo
-}
-
-// The Implements checker verifies that the obtained value
-// implements the interface specified via a pointer to an interface
-// variable.
-//
-// For example:
-//
-// var e os.Error
-// c.Assert(err, Implements, &e)
-//
-var Implements Checker = &implementsChecker{
- &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
-}
-
-func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
- obtained := reflect.ValueOf(params[0])
- ifaceptr := reflect.ValueOf(params[1])
- if !obtained.IsValid() {
- return false, ""
- }
- if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
- return false, "ifaceptr should be a pointer to an interface variable"
- }
- return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go
deleted file mode 100644
index 4b6c26da45..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package check
-
-import (
- "fmt"
- "strings"
- "time"
-)
-
-// TestName returns the current test name in the form "SuiteName.TestName"
-func (c *C) TestName() string {
- return c.testName
-}
-
-// -----------------------------------------------------------------------
-// Basic succeeding/failing logic.
-
-// Failed returns whether the currently running test has already failed.
-func (c *C) Failed() bool {
- return c.status == failedSt
-}
-
-// Fail marks the currently running test as failed.
-//
-// Something ought to have been previously logged so the developer can tell
-// what went wrong. The higher level helper functions will fail the test
-// and do the logging properly.
-func (c *C) Fail() {
- c.status = failedSt
-}
-
-// FailNow marks the currently running test as failed and stops running it.
-// Something ought to have been previously logged so the developer can tell
-// what went wrong. The higher level helper functions will fail the test
-// and do the logging properly.
-func (c *C) FailNow() {
- c.Fail()
- c.stopNow()
-}
-
-// Succeed marks the currently running test as succeeded, undoing any
-// previous failures.
-func (c *C) Succeed() {
- c.status = succeededSt
-}
-
-// SucceedNow marks the currently running test as succeeded, undoing any
-// previous failures, and stops running the test.
-func (c *C) SucceedNow() {
- c.Succeed()
- c.stopNow()
-}
-
-// ExpectFailure informs that the running test is knowingly broken for
-// the provided reason. If the test does not fail, an error will be reported
-// to raise attention to this fact. This method is useful to temporarily
-// disable tests which cover well known problems until a better time to
-// fix the problem is found, without forgetting about the fact that a
-// failure still exists.
-func (c *C) ExpectFailure(reason string) {
- if reason == "" {
- panic("Missing reason why the test is expected to fail")
- }
- c.mustFail = true
- c.reason = reason
-}
-
-// Skip skips the running test for the provided reason. If run from within
-// SetUpTest, the individual test being set up will be skipped, and if run
-// from within SetUpSuite, the whole suite is skipped.
-func (c *C) Skip(reason string) {
- if reason == "" {
- panic("Missing reason why the test is being skipped")
- }
- c.reason = reason
- c.status = skippedSt
- c.stopNow()
-}
-
-// -----------------------------------------------------------------------
-// Basic logging.
-
-// GetTestLog returns the current test error output.
-func (c *C) GetTestLog() string {
- return c.logb.String()
-}
-
-// Log logs some information into the test error output.
-// The provided arguments are assembled together into a string with fmt.Sprint.
-func (c *C) Log(args ...interface{}) {
- c.log(args...)
-}
-
-// Log logs some information into the test error output.
-// The provided arguments are assembled together into a string with fmt.Sprintf.
-func (c *C) Logf(format string, args ...interface{}) {
- c.logf(format, args...)
-}
-
-// Output enables *C to be used as a logger in functions that require only
-// the minimum interface of *log.Logger.
-func (c *C) Output(calldepth int, s string) error {
- d := time.Now().Sub(c.startTime)
- msec := d / time.Millisecond
- sec := d / time.Second
- min := d / time.Minute
-
- c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
- return nil
-}
-
-// Error logs an error into the test error output and marks the test as failed.
-// The provided arguments are assembled together into a string with fmt.Sprint.
-func (c *C) Error(args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
- c.logNewLine()
- c.Fail()
-}
-
-// Errorf logs an error into the test error output and marks the test as failed.
-// The provided arguments are assembled together into a string with fmt.Sprintf.
-func (c *C) Errorf(format string, args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprintf("Error: "+format, args...))
- c.logNewLine()
- c.Fail()
-}
-
-// Fatal logs an error into the test error output, marks the test as failed, and
-// stops the test execution. The provided arguments are assembled together into
-// a string with fmt.Sprint.
-func (c *C) Fatal(args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
- c.logNewLine()
- c.FailNow()
-}
-
-// Fatlaf logs an error into the test error output, marks the test as failed, and
-// stops the test execution. The provided arguments are assembled together into
-// a string with fmt.Sprintf.
-func (c *C) Fatalf(format string, args ...interface{}) {
- c.logCaller(1)
- c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
- c.logNewLine()
- c.FailNow()
-}
-
-// -----------------------------------------------------------------------
-// Generic checks and assertions based on checkers.
-
-// Check verifies if the first value matches the expected value according
-// to the provided checker. If they do not match, an error is logged, the
-// test is marked as failed, and the test execution continues.
-//
-// Some checkers may not need the expected argument (e.g. IsNil).
-//
-// Extra arguments provided to the function are logged next to the reported
-// problem when the matching fails.
-func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
- return c.internalCheck("Check", obtained, checker, args...)
-}
-
-// Assert ensures that the first value matches the expected value according
-// to the provided checker. If they do not match, an error is logged, the
-// test is marked as failed, and the test execution stops.
-//
-// Some checkers may not need the expected argument (e.g. IsNil).
-//
-// Extra arguments provided to the function are logged next to the reported
-// problem when the matching fails.
-func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
- if !c.internalCheck("Assert", obtained, checker, args...) {
- c.stopNow()
- }
-}
-
-func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
- if checker == nil {
- c.logCaller(2)
- c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
- c.logString("Oops.. you've provided a nil checker!")
- c.logNewLine()
- c.Fail()
- return false
- }
-
- // If the last argument is a bug info, extract it out.
- var comment CommentInterface
- if len(args) > 0 {
- if c, ok := args[len(args)-1].(CommentInterface); ok {
- comment = c
- args = args[:len(args)-1]
- }
- }
-
- params := append([]interface{}{obtained}, args...)
- info := checker.Info()
-
- if len(params) != len(info.Params) {
- names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
- c.logCaller(2)
- c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
- c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
- c.logNewLine()
- c.Fail()
- return false
- }
-
- // Copy since it may be mutated by Check.
- names := append([]string{}, info.Params...)
-
- // Do the actual check.
- result, error := checker.Check(params, names)
- if !result || error != "" {
- c.logCaller(2)
- for i := 0; i != len(params); i++ {
- c.logValue(names[i], params[i])
- }
- if comment != nil {
- c.logString(comment.CheckCommentString())
- }
- if error != "" {
- c.logString(error)
- }
- c.logNewLine()
- c.Fail()
- return false
- }
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go
deleted file mode 100644
index e0f7557b5c..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package check
-
-import (
- "bytes"
- "go/ast"
- "go/parser"
- "go/printer"
- "go/token"
- "os"
-)
-
-func indent(s, with string) (r string) {
- eol := true
- for i := 0; i != len(s); i++ {
- c := s[i]
- switch {
- case eol && c == '\n' || c == '\r':
- case c == '\n' || c == '\r':
- eol = true
- case eol:
- eol = false
- s = s[:i] + with + s[i:]
- i += len(with)
- }
- }
- return s
-}
-
-func printLine(filename string, line int) (string, error) {
- fset := token.NewFileSet()
- file, err := os.Open(filename)
- if err != nil {
- return "", err
- }
- fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
- if err != nil {
- return "", err
- }
- config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
- lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
- ast.Walk(lp, fnode)
- result := lp.output.Bytes()
- // Comments leave \n at the end.
- n := len(result)
- for n > 0 && result[n-1] == '\n' {
- n--
- }
- return string(result[:n]), nil
-}
-
-type linePrinter struct {
- config *printer.Config
- fset *token.FileSet
- fnode *ast.File
- line int
- output bytes.Buffer
- stmt ast.Stmt
-}
-
-func (lp *linePrinter) emit() bool {
- if lp.stmt != nil {
- lp.trim(lp.stmt)
- lp.printWithComments(lp.stmt)
- lp.stmt = nil
- return true
- }
- return false
-}
-
-func (lp *linePrinter) printWithComments(n ast.Node) {
- nfirst := lp.fset.Position(n.Pos()).Line
- nlast := lp.fset.Position(n.End()).Line
- for _, g := range lp.fnode.Comments {
- cfirst := lp.fset.Position(g.Pos()).Line
- clast := lp.fset.Position(g.End()).Line
- if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
- for _, c := range g.List {
- lp.output.WriteString(c.Text)
- lp.output.WriteByte('\n')
- }
- }
- if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
- // The printer will not include the comment if it starts past
- // the node itself. Trick it into printing by overlapping the
- // slash with the end of the statement.
- g.List[0].Slash = n.End() - 1
- }
- }
- node := &printer.CommentedNode{n, lp.fnode.Comments}
- lp.config.Fprint(&lp.output, lp.fset, node)
-}
-
-func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
- if n == nil {
- if lp.output.Len() == 0 {
- lp.emit()
- }
- return nil
- }
- first := lp.fset.Position(n.Pos()).Line
- last := lp.fset.Position(n.End()).Line
- if first <= lp.line && last >= lp.line {
- // Print the innermost statement containing the line.
- if stmt, ok := n.(ast.Stmt); ok {
- if _, ok := n.(*ast.BlockStmt); !ok {
- lp.stmt = stmt
- }
- }
- if first == lp.line && lp.emit() {
- return nil
- }
- return lp
- }
- return nil
-}
-
-func (lp *linePrinter) trim(n ast.Node) bool {
- stmt, ok := n.(ast.Stmt)
- if !ok {
- return true
- }
- line := lp.fset.Position(n.Pos()).Line
- if line != lp.line {
- return false
- }
- switch stmt := stmt.(type) {
- case *ast.IfStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.SwitchStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.TypeSwitchStmt:
- stmt.Body = lp.trimBlock(stmt.Body)
- case *ast.CaseClause:
- stmt.Body = lp.trimList(stmt.Body)
- case *ast.CommClause:
- stmt.Body = lp.trimList(stmt.Body)
- case *ast.BlockStmt:
- stmt.List = lp.trimList(stmt.List)
- }
- return true
-}
-
-func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
- if !lp.trim(stmt) {
- return lp.emptyBlock(stmt)
- }
- stmt.Rbrace = stmt.Lbrace
- return stmt
-}
-
-func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
- for i := 0; i != len(stmts); i++ {
- if !lp.trim(stmts[i]) {
- stmts[i] = lp.emptyStmt(stmts[i])
- break
- }
- }
- return stmts
-}
-
-func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
- return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
-}
-
-func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
- p := n.Pos()
- return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go
deleted file mode 100644
index da8fd79872..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package check
-
-import (
- "bufio"
- "flag"
- "fmt"
- "os"
- "testing"
- "time"
-)
-
-// -----------------------------------------------------------------------
-// Test suite registry.
-
-var allSuites []interface{}
-
-// Suite registers the given value as a test suite to be run. Any methods
-// starting with the Test prefix in the given value will be considered as
-// a test method.
-func Suite(suite interface{}) interface{} {
- allSuites = append(allSuites, suite)
- return suite
-}
-
-// -----------------------------------------------------------------------
-// Public running interface.
-
-var (
- oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
- oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
- oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
- oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
- oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
- oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
- oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
-
- newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
- newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
- newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
- newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
- newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
- newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
- newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
- newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
-)
-
-// TestingT runs all test suites registered with the Suite function,
-// printing results to stdout, and reporting any failures back to
-// the "testing" package.
-func TestingT(testingT *testing.T) {
- benchTime := *newBenchTime
- if benchTime == 1*time.Second {
- benchTime = *oldBenchTime
- }
- conf := &RunConf{
- Filter: *oldFilterFlag + *newFilterFlag,
- Verbose: *oldVerboseFlag || *newVerboseFlag,
- Stream: *oldStreamFlag || *newStreamFlag,
- Benchmark: *oldBenchFlag || *newBenchFlag,
- BenchmarkTime: benchTime,
- BenchmarkMem: *newBenchMem,
- KeepWorkDir: *oldWorkFlag || *newWorkFlag,
- }
- if *oldListFlag || *newListFlag {
- w := bufio.NewWriter(os.Stdout)
- for _, name := range ListAll(conf) {
- fmt.Fprintln(w, name)
- }
- w.Flush()
- return
- }
- result := RunAll(conf)
- println(result.String())
- if !result.Passed() {
- testingT.Fail()
- }
-}
-
-// RunAll runs all test suites registered with the Suite function, using the
-// provided run configuration.
-func RunAll(runConf *RunConf) *Result {
- result := Result{}
- for _, suite := range allSuites {
- result.Add(Run(suite, runConf))
- }
- return &result
-}
-
-// Run runs the provided test suite using the provided run configuration.
-func Run(suite interface{}, runConf *RunConf) *Result {
- runner := newSuiteRunner(suite, runConf)
- return runner.run()
-}
-
-// ListAll returns the names of all the test functions registered with the
-// Suite function that will be run with the provided run configuration.
-func ListAll(runConf *RunConf) []string {
- var names []string
- for _, suite := range allSuites {
- names = append(names, List(suite, runConf)...)
- }
- return names
-}
-
-// List returns the names of the test functions in the given
-// suite that will be run with the provided run configuration.
-func List(suite interface{}, runConf *RunConf) []string {
- var names []string
- runner := newSuiteRunner(suite, runConf)
- for _, t := range runner.tests {
- names = append(names, t.String())
- }
- return names
-}
-
-// -----------------------------------------------------------------------
-// Result methods.
-
-func (r *Result) Add(other *Result) {
- r.Succeeded += other.Succeeded
- r.Skipped += other.Skipped
- r.Failed += other.Failed
- r.Panicked += other.Panicked
- r.FixturePanicked += other.FixturePanicked
- r.ExpectedFailures += other.ExpectedFailures
- r.Missed += other.Missed
- if r.WorkDir != "" && other.WorkDir != "" {
- r.WorkDir += ":" + other.WorkDir
- } else if other.WorkDir != "" {
- r.WorkDir = other.WorkDir
- }
-}
-
-func (r *Result) Passed() bool {
- return (r.Failed == 0 && r.Panicked == 0 &&
- r.FixturePanicked == 0 && r.Missed == 0 &&
- r.RunError == nil)
-}
-
-func (r *Result) String() string {
- if r.RunError != nil {
- return "ERROR: " + r.RunError.Error()
- }
-
- var value string
- if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
- r.Missed == 0 {
- value = "OK: "
- } else {
- value = "OOPS: "
- }
- value += fmt.Sprintf("%d passed", r.Succeeded)
- if r.Skipped != 0 {
- value += fmt.Sprintf(", %d skipped", r.Skipped)
- }
- if r.ExpectedFailures != 0 {
- value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
- }
- if r.Failed != 0 {
- value += fmt.Sprintf(", %d FAILED", r.Failed)
- }
- if r.Panicked != 0 {
- value += fmt.Sprintf(", %d PANICKED", r.Panicked)
- }
- if r.FixturePanicked != 0 {
- value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
- }
- if r.Missed != 0 {
- value += fmt.Sprintf(", %d MISSED", r.Missed)
- }
- if r.WorkDir != "" {
- value += "\nWORK=" + r.WorkDir
- }
- return value
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE
deleted file mode 100644
index a68e67f01b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE
+++ /dev/null
@@ -1,188 +0,0 @@
-
-Copyright (c) 2011-2014 - Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
deleted file mode 100644
index 8da58fbf6f..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
+++ /dev/null
@@ -1,31 +0,0 @@
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original copyright and license:
-
- apic.go
- emitterc.go
- parserc.go
- readerc.go
- scannerc.go
- writerc.go
- yamlh.go
- yamlprivateh.go
-
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md
deleted file mode 100644
index d6c919e607..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v2*.
-
-To install it, run:
-
- go get gopkg.in/yaml.v2
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
-
-API stability
--------------
-
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
- "fmt"
- "log"
-
- "gopkg.in/yaml.v2"
-)
-
-var data = `
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-`
-
-type T struct {
- A string
- B struct{C int; D []int ",flow"}
-}
-
-func main() {
- t := T{}
-
- err := yaml.Unmarshal([]byte(data), &t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t:\n%v\n\n", t)
-
- d, err := yaml.Marshal(&t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t dump:\n%s\n\n", string(d))
-
- m := make(map[interface{}]interface{})
-
- err = yaml.Unmarshal([]byte(data), &m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m:\n%v\n\n", m)
-
- d, err = yaml.Marshal(&m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
- c: 2
- d:
- - 3
- - 4
-```
-
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go
deleted file mode 100644
index 95ec014e8c..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go
+++ /dev/null
@@ -1,742 +0,0 @@
-package yaml
-
-import (
- "io"
- "os"
-)
-
-func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
- //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
-
- // Check if we can move the queue at the beginning of the buffer.
- if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
- if parser.tokens_head != len(parser.tokens) {
- copy(parser.tokens, parser.tokens[parser.tokens_head:])
- }
- parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
- parser.tokens_head = 0
- }
- parser.tokens = append(parser.tokens, *token)
- if pos < 0 {
- return
- }
- copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
- parser.tokens[parser.tokens_head+pos] = *token
-}
-
-// Create a new parser object.
-func yaml_parser_initialize(parser *yaml_parser_t) bool {
- *parser = yaml_parser_t{
- raw_buffer: make([]byte, 0, input_raw_buffer_size),
- buffer: make([]byte, 0, input_buffer_size),
- }
- return true
-}
-
-// Destroy a parser object.
-func yaml_parser_delete(parser *yaml_parser_t) {
- *parser = yaml_parser_t{}
-}
-
-// String read handler.
-func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- if parser.input_pos == len(parser.input) {
- return 0, io.EOF
- }
- n = copy(buffer, parser.input[parser.input_pos:])
- parser.input_pos += n
- return n, nil
-}
-
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- return parser.input_file.Read(buffer)
-}
-
-// Set a string input.
-func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_string_read_handler
- parser.input = input
- parser.input_pos = 0
-}
-
-// Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_file_read_handler
- parser.input_file = file
-}
-
-// Set the source encoding.
-func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
- if parser.encoding != yaml_ANY_ENCODING {
- panic("must set the encoding only once")
- }
- parser.encoding = encoding
-}
-
-// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
- *emitter = yaml_emitter_t{
- buffer: make([]byte, output_buffer_size),
- raw_buffer: make([]byte, 0, output_raw_buffer_size),
- states: make([]yaml_emitter_state_t, 0, initial_stack_size),
- events: make([]yaml_event_t, 0, initial_queue_size),
- }
- return true
-}
-
-// Destroy an emitter object.
-func yaml_emitter_delete(emitter *yaml_emitter_t) {
- *emitter = yaml_emitter_t{}
-}
-
-// String write handler.
-func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
- return nil
-}
-
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- _, err := emitter.output_file.Write(buffer)
- return err
-}
-
-// Set a string output.
-func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_string_write_handler
- emitter.output_buffer = output_buffer
-}
-
-// Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_file_write_handler
- emitter.output_file = file
-}
-
-// Set the output encoding.
-func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
- if emitter.encoding != yaml_ANY_ENCODING {
- panic("must set the output encoding only once")
- }
- emitter.encoding = encoding
-}
-
-// Set the canonical output style.
-func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
- emitter.canonical = canonical
-}
-
-//// Set the indentation increment.
-func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
- if indent < 2 || indent > 9 {
- indent = 2
- }
- emitter.best_indent = indent
-}
-
-// Set the preferred line width.
-func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
- if width < 0 {
- width = -1
- }
- emitter.best_width = width
-}
-
-// Set if unescaped non-ASCII characters are allowed.
-func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
- emitter.unicode = unicode
-}
-
-// Set the preferred line break character.
-func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
- emitter.line_break = line_break
-}
-
-///*
-// * Destroy a token object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_token_delete(yaml_token_t *token)
-//{
-// assert(token); // Non-NULL token object expected.
-//
-// switch (token.type)
-// {
-// case YAML_TAG_DIRECTIVE_TOKEN:
-// yaml_free(token.data.tag_directive.handle);
-// yaml_free(token.data.tag_directive.prefix);
-// break;
-//
-// case YAML_ALIAS_TOKEN:
-// yaml_free(token.data.alias.value);
-// break;
-//
-// case YAML_ANCHOR_TOKEN:
-// yaml_free(token.data.anchor.value);
-// break;
-//
-// case YAML_TAG_TOKEN:
-// yaml_free(token.data.tag.handle);
-// yaml_free(token.data.tag.suffix);
-// break;
-//
-// case YAML_SCALAR_TOKEN:
-// yaml_free(token.data.scalar.value);
-// break;
-//
-// default:
-// break;
-// }
-//
-// memset(token, 0, sizeof(yaml_token_t));
-//}
-//
-///*
-// * Check if a string is a valid UTF-8 sequence.
-// *
-// * Check 'reader.c' for more details on UTF-8 encoding.
-// */
-//
-//static int
-//yaml_check_utf8(yaml_char_t *start, size_t length)
-//{
-// yaml_char_t *end = start+length;
-// yaml_char_t *pointer = start;
-//
-// while (pointer < end) {
-// unsigned char octet;
-// unsigned int width;
-// unsigned int value;
-// size_t k;
-//
-// octet = pointer[0];
-// width = (octet & 0x80) == 0x00 ? 1 :
-// (octet & 0xE0) == 0xC0 ? 2 :
-// (octet & 0xF0) == 0xE0 ? 3 :
-// (octet & 0xF8) == 0xF0 ? 4 : 0;
-// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
-// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
-// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
-// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
-// if (!width) return 0;
-// if (pointer+width > end) return 0;
-// for (k = 1; k < width; k ++) {
-// octet = pointer[k];
-// if ((octet & 0xC0) != 0x80) return 0;
-// value = (value << 6) + (octet & 0x3F);
-// }
-// if (!((width == 1) ||
-// (width == 2 && value >= 0x80) ||
-// (width == 3 && value >= 0x800) ||
-// (width == 4 && value >= 0x10000))) return 0;
-//
-// pointer += width;
-// }
-//
-// return 1;
-//}
-//
-
-// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- encoding: encoding,
- }
- return true
-}
-
-// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- }
- return true
-}
-
-// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
- tag_directives []yaml_tag_directive_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: implicit,
- }
- return true
-}
-
-// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- implicit: implicit,
- }
- return true
-}
-
-///*
-// * Create ALIAS.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
-//{
-// mark yaml_mark_t = { 0, 0, 0 }
-// anchor_copy *yaml_char_t = NULL
-//
-// assert(event) // Non-NULL event object is expected.
-// assert(anchor) // Non-NULL anchor is expected.
-//
-// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
-//
-// anchor_copy = yaml_strdup(anchor)
-// if (!anchor_copy)
-// return 0
-//
-// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
-//
-// return 1
-//}
-
-// Create SCALAR.
-func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- anchor: anchor,
- tag: tag,
- value: value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-START.
-func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-END.
-func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- }
- return true
-}
-
-// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- }
- return true
-}
-
-// Destroy an event object.
-func yaml_event_delete(event *yaml_event_t) {
- *event = yaml_event_t{}
-}
-
-///*
-// * Create a document object.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_initialize(document *yaml_document_t,
-// version_directive *yaml_version_directive_t,
-// tag_directives_start *yaml_tag_directive_t,
-// tag_directives_end *yaml_tag_directive_t,
-// start_implicit int, end_implicit int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// struct {
-// start *yaml_node_t
-// end *yaml_node_t
-// top *yaml_node_t
-// } nodes = { NULL, NULL, NULL }
-// version_directive_copy *yaml_version_directive_t = NULL
-// struct {
-// start *yaml_tag_directive_t
-// end *yaml_tag_directive_t
-// top *yaml_tag_directive_t
-// } tag_directives_copy = { NULL, NULL, NULL }
-// value yaml_tag_directive_t = { NULL, NULL }
-// mark yaml_mark_t = { 0, 0, 0 }
-//
-// assert(document) // Non-NULL document object is expected.
-// assert((tag_directives_start && tag_directives_end) ||
-// (tag_directives_start == tag_directives_end))
-// // Valid tag directives are expected.
-//
-// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
-//
-// if (version_directive) {
-// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
-// if (!version_directive_copy) goto error
-// version_directive_copy.major = version_directive.major
-// version_directive_copy.minor = version_directive.minor
-// }
-//
-// if (tag_directives_start != tag_directives_end) {
-// tag_directive *yaml_tag_directive_t
-// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
-// goto error
-// for (tag_directive = tag_directives_start
-// tag_directive != tag_directives_end; tag_directive ++) {
-// assert(tag_directive.handle)
-// assert(tag_directive.prefix)
-// if (!yaml_check_utf8(tag_directive.handle,
-// strlen((char *)tag_directive.handle)))
-// goto error
-// if (!yaml_check_utf8(tag_directive.prefix,
-// strlen((char *)tag_directive.prefix)))
-// goto error
-// value.handle = yaml_strdup(tag_directive.handle)
-// value.prefix = yaml_strdup(tag_directive.prefix)
-// if (!value.handle || !value.prefix) goto error
-// if (!PUSH(&context, tag_directives_copy, value))
-// goto error
-// value.handle = NULL
-// value.prefix = NULL
-// }
-// }
-//
-// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
-// tag_directives_copy.start, tag_directives_copy.top,
-// start_implicit, end_implicit, mark, mark)
-//
-// return 1
-//
-//error:
-// STACK_DEL(&context, nodes)
-// yaml_free(version_directive_copy)
-// while (!STACK_EMPTY(&context, tag_directives_copy)) {
-// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-// }
-// STACK_DEL(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-//
-// return 0
-//}
-//
-///*
-// * Destroy a document object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_document_delete(document *yaml_document_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// tag_directive *yaml_tag_directive_t
-//
-// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// while (!STACK_EMPTY(&context, document.nodes)) {
-// node yaml_node_t = POP(&context, document.nodes)
-// yaml_free(node.tag)
-// switch (node.type) {
-// case YAML_SCALAR_NODE:
-// yaml_free(node.data.scalar.value)
-// break
-// case YAML_SEQUENCE_NODE:
-// STACK_DEL(&context, node.data.sequence.items)
-// break
-// case YAML_MAPPING_NODE:
-// STACK_DEL(&context, node.data.mapping.pairs)
-// break
-// default:
-// assert(0) // Should not happen.
-// }
-// }
-// STACK_DEL(&context, document.nodes)
-//
-// yaml_free(document.version_directive)
-// for (tag_directive = document.tag_directives.start
-// tag_directive != document.tag_directives.end
-// tag_directive++) {
-// yaml_free(tag_directive.handle)
-// yaml_free(tag_directive.prefix)
-// }
-// yaml_free(document.tag_directives.start)
-//
-// memset(document, 0, sizeof(yaml_document_t))
-//}
-//
-///**
-// * Get a document node.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_node(document *yaml_document_t, index int)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
-// return document.nodes.start + index - 1
-// }
-// return NULL
-//}
-//
-///**
-// * Get the root object.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_root_node(document *yaml_document_t)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (document.nodes.top != document.nodes.start) {
-// return document.nodes.start
-// }
-// return NULL
-//}
-//
-///*
-// * Add a scalar node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_scalar(document *yaml_document_t,
-// tag *yaml_char_t, value *yaml_char_t, length int,
-// style yaml_scalar_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// value_copy *yaml_char_t = NULL
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-// assert(value) // Non-NULL value is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (length < 0) {
-// length = strlen((char *)value)
-// }
-//
-// if (!yaml_check_utf8(value, length)) goto error
-// value_copy = yaml_malloc(length+1)
-// if (!value_copy) goto error
-// memcpy(value_copy, value, length)
-// value_copy[length] = '\0'
-//
-// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// yaml_free(tag_copy)
-// yaml_free(value_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a sequence node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_sequence(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_sequence_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_item_t
-// end *yaml_node_item_t
-// top *yaml_node_item_t
-// } items = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
-//
-// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, items)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a mapping node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_mapping(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_mapping_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_pair_t
-// end *yaml_node_pair_t
-// top *yaml_node_pair_t
-// } pairs = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
-//
-// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, pairs)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Append an item to a sequence node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_sequence_item(document *yaml_document_t,
-// sequence int, item int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// assert(document) // Non-NULL document is required.
-// assert(sequence > 0
-// && document.nodes.start + sequence <= document.nodes.top)
-// // Valid sequence id is required.
-// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
-// // A sequence node is required.
-// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
-// // Valid item id is required.
-//
-// if (!PUSH(&context,
-// document.nodes.start[sequence-1].data.sequence.items, item))
-// return 0
-//
-// return 1
-//}
-//
-///*
-// * Append a pair of a key and a value to a mapping node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_mapping_pair(document *yaml_document_t,
-// mapping int, key int, value int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// pair yaml_node_pair_t
-//
-// assert(document) // Non-NULL document is required.
-// assert(mapping > 0
-// && document.nodes.start + mapping <= document.nodes.top)
-// // Valid mapping id is required.
-// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
-// // A mapping node is required.
-// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
-// // Valid key id is required.
-// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
-// // Valid value id is required.
-//
-// pair.key = key
-// pair.value = value
-//
-// if (!PUSH(&context,
-// document.nodes.start[mapping-1].data.mapping.pairs, pair))
-// return 0
-//
-// return 1
-//}
-//
-//
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go
deleted file mode 100644
index ec9d271017..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go
+++ /dev/null
@@ -1,667 +0,0 @@
-package yaml
-
-import (
- "encoding"
- "encoding/base64"
- "fmt"
- "math"
- "reflect"
- "strconv"
- "time"
-)
-
-const (
- documentNode = 1 << iota
- mappingNode
- sequenceNode
- scalarNode
- aliasNode
-)
-
-type node struct {
- kind int
- line, column int
- tag string
- value string
- implicit bool
- children []*node
- anchors map[string]*node
-}
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
- parser yaml_parser_t
- event yaml_event_t
- doc *node
-}
-
-func newParser(b []byte) *parser {
- p := parser{}
- if !yaml_parser_initialize(&p.parser) {
- panic("failed to initialize YAML emitter")
- }
-
- if len(b) == 0 {
- b = []byte{'\n'}
- }
-
- yaml_parser_set_input_string(&p.parser, b)
-
- p.skip()
- if p.event.typ != yaml_STREAM_START_EVENT {
- panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return &p
-}
-
-func (p *parser) destroy() {
- if p.event.typ != yaml_NO_EVENT {
- yaml_event_delete(&p.event)
- }
- yaml_parser_delete(&p.parser)
-}
-
-func (p *parser) skip() {
- if p.event.typ != yaml_NO_EVENT {
- if p.event.typ == yaml_STREAM_END_EVENT {
- failf("attempted to go past the end of stream; corrupted value?")
- }
- yaml_event_delete(&p.event)
- }
- if !yaml_parser_parse(&p.parser, &p.event) {
- p.fail()
- }
-}
-
-func (p *parser) fail() {
- var where string
- var line int
- if p.parser.problem_mark.line != 0 {
- line = p.parser.problem_mark.line
- } else if p.parser.context_mark.line != 0 {
- line = p.parser.context_mark.line
- }
- if line != 0 {
- where = "line " + strconv.Itoa(line) + ": "
- }
- var msg string
- if len(p.parser.problem) > 0 {
- msg = p.parser.problem
- } else {
- msg = "unknown problem parsing YAML content"
- }
- failf("%s%s", where, msg)
-}
-
-func (p *parser) anchor(n *node, anchor []byte) {
- if anchor != nil {
- p.doc.anchors[string(anchor)] = n
- }
-}
-
-func (p *parser) parse() *node {
- switch p.event.typ {
- case yaml_SCALAR_EVENT:
- return p.scalar()
- case yaml_ALIAS_EVENT:
- return p.alias()
- case yaml_MAPPING_START_EVENT:
- return p.mapping()
- case yaml_SEQUENCE_START_EVENT:
- return p.sequence()
- case yaml_DOCUMENT_START_EVENT:
- return p.document()
- case yaml_STREAM_END_EVENT:
- // Happens when attempting to decode an empty buffer.
- return nil
- default:
- panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
- }
- panic("unreachable")
-}
-
-func (p *parser) node(kind int) *node {
- return &node{
- kind: kind,
- line: p.event.start_mark.line,
- column: p.event.start_mark.column,
- }
-}
-
-func (p *parser) document() *node {
- n := p.node(documentNode)
- n.anchors = make(map[string]*node)
- p.doc = n
- p.skip()
- n.children = append(n.children, p.parse())
- if p.event.typ != yaml_DOCUMENT_END_EVENT {
- panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return n
-}
-
-func (p *parser) alias() *node {
- n := p.node(aliasNode)
- n.value = string(p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) scalar() *node {
- n := p.node(scalarNode)
- n.value = string(p.event.value)
- n.tag = string(p.event.tag)
- n.implicit = p.event.implicit
- p.anchor(n, p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) sequence() *node {
- n := p.node(sequenceNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_SEQUENCE_END_EVENT {
- n.children = append(n.children, p.parse())
- }
- p.skip()
- return n
-}
-
-func (p *parser) mapping() *node {
- n := p.node(mappingNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_MAPPING_END_EVENT {
- n.children = append(n.children, p.parse(), p.parse())
- }
- p.skip()
- return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
- doc *node
- aliases map[string]bool
- mapType reflect.Type
- terrors []string
-}
-
-var (
- mapItemType = reflect.TypeOf(MapItem{})
- durationType = reflect.TypeOf(time.Duration(0))
- defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
- ifaceType = defaultMapType.Elem()
-)
-
-func newDecoder() *decoder {
- d := &decoder{mapType: defaultMapType}
- d.aliases = make(map[string]bool)
- return d
-}
-
-func (d *decoder) terror(n *node, tag string, out reflect.Value) {
- if n.tag != "" {
- tag = n.tag
- }
- value := n.value
- if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
- if len(value) > 10 {
- value = " `" + value[:7] + "...`"
- } else {
- value = " `" + value + "`"
- }
- }
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
-}
-
-func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
- terrlen := len(d.terrors)
- err := u.UnmarshalYAML(func(v interface{}) (err error) {
- defer handleErr(&err)
- d.unmarshal(n, reflect.ValueOf(v))
- if len(d.terrors) > terrlen {
- issues := d.terrors[terrlen:]
- d.terrors = d.terrors[:terrlen]
- return &TypeError{issues}
- }
- return nil
- })
- if e, ok := err.(*TypeError); ok {
- d.terrors = append(d.terrors, e.Errors...)
- return false
- }
- if err != nil {
- fail(err)
- }
- return true
-}
-
-// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
-// if a value is found to implement it.
-// It returns the initialized and dereferenced out value, whether
-// unmarshalling was already done by UnmarshalYAML, and if so whether
-// its types unmarshalled appropriately.
-//
-// If n holds a null value, prepare returns before doing anything.
-func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
- if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
- return out, false, false
- }
- again := true
- for again {
- again = false
- if out.Kind() == reflect.Ptr {
- if out.IsNil() {
- out.Set(reflect.New(out.Type().Elem()))
- }
- out = out.Elem()
- again = true
- }
- if out.CanAddr() {
- if u, ok := out.Addr().Interface().(Unmarshaler); ok {
- good = d.callUnmarshaler(n, u)
- return out, true, good
- }
- }
- }
- return out, false, false
-}
-
-func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
- switch n.kind {
- case documentNode:
- return d.document(n, out)
- case aliasNode:
- return d.alias(n, out)
- }
- out, unmarshaled, good := d.prepare(n, out)
- if unmarshaled {
- return good
- }
- switch n.kind {
- case scalarNode:
- good = d.scalar(n, out)
- case mappingNode:
- good = d.mapping(n, out)
- case sequenceNode:
- good = d.sequence(n, out)
- default:
- panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
- }
- return good
-}
-
-func (d *decoder) document(n *node, out reflect.Value) (good bool) {
- if len(n.children) == 1 {
- d.doc = n
- d.unmarshal(n.children[0], out)
- return true
- }
- return false
-}
-
-func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
- an, ok := d.doc.anchors[n.value]
- if !ok {
- failf("unknown anchor '%s' referenced", n.value)
- }
- if d.aliases[n.value] {
- failf("anchor '%s' value contains itself", n.value)
- }
- d.aliases[n.value] = true
- good = d.unmarshal(an, out)
- delete(d.aliases, n.value)
- return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
- for _, k := range out.MapKeys() {
- out.SetMapIndex(k, zeroValue)
- }
-}
-
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
- var tag string
- var resolved interface{}
- if n.tag == "" && !n.implicit {
- tag = yaml_STR_TAG
- resolved = n.value
- } else {
- tag, resolved = resolve(n.tag, n.value)
- if tag == yaml_BINARY_TAG {
- data, err := base64.StdEncoding.DecodeString(resolved.(string))
- if err != nil {
- failf("!!binary value contains invalid base64 data")
- }
- resolved = string(data)
- }
- }
- if resolved == nil {
- if out.Kind() == reflect.Map && !out.CanAddr() {
- resetMap(out)
- } else {
- out.Set(reflect.Zero(out.Type()))
- }
- return true
- }
- if s, ok := resolved.(string); ok && out.CanAddr() {
- if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
- err := u.UnmarshalText([]byte(s))
- if err != nil {
- fail(err)
- }
- return true
- }
- }
- switch out.Kind() {
- case reflect.String:
- if tag == yaml_BINARY_TAG {
- out.SetString(resolved.(string))
- good = true
- } else if resolved != nil {
- out.SetString(n.value)
- good = true
- }
- case reflect.Interface:
- if resolved == nil {
- out.Set(reflect.Zero(out.Type()))
- } else {
- out.Set(reflect.ValueOf(resolved))
- }
- good = true
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- switch resolved := resolved.(type) {
- case int:
- if !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case int64:
- if !out.OverflowInt(resolved) {
- out.SetInt(resolved)
- good = true
- }
- case uint64:
- if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case float64:
- if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case string:
- if out.Type() == durationType {
- d, err := time.ParseDuration(resolved)
- if err == nil {
- out.SetInt(int64(d))
- good = true
- }
- }
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- switch resolved := resolved.(type) {
- case int:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case int64:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case uint64:
- if !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case float64:
- if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- }
- case reflect.Bool:
- switch resolved := resolved.(type) {
- case bool:
- out.SetBool(resolved)
- good = true
- }
- case reflect.Float32, reflect.Float64:
- switch resolved := resolved.(type) {
- case int:
- out.SetFloat(float64(resolved))
- good = true
- case int64:
- out.SetFloat(float64(resolved))
- good = true
- case uint64:
- out.SetFloat(float64(resolved))
- good = true
- case float64:
- out.SetFloat(resolved)
- good = true
- }
- case reflect.Ptr:
- if out.Type().Elem() == reflect.TypeOf(resolved) {
- // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
- elem := reflect.New(out.Type().Elem())
- elem.Elem().Set(reflect.ValueOf(resolved))
- out.Set(elem)
- good = true
- }
- }
- if !good {
- d.terror(n, tag, out)
- }
- return good
-}
-
-func settableValueOf(i interface{}) reflect.Value {
- v := reflect.ValueOf(i)
- sv := reflect.New(v.Type()).Elem()
- sv.Set(v)
- return sv
-}
-
-func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
- l := len(n.children)
-
- var iface reflect.Value
- switch out.Kind() {
- case reflect.Slice:
- out.Set(reflect.MakeSlice(out.Type(), l, l))
- case reflect.Interface:
- // No type hints. Will have to use a generic sequence.
- iface = out
- out = settableValueOf(make([]interface{}, l))
- default:
- d.terror(n, yaml_SEQ_TAG, out)
- return false
- }
- et := out.Type().Elem()
-
- j := 0
- for i := 0; i < l; i++ {
- e := reflect.New(et).Elem()
- if ok := d.unmarshal(n.children[i], e); ok {
- out.Index(j).Set(e)
- j++
- }
- }
- out.Set(out.Slice(0, j))
- if iface.IsValid() {
- iface.Set(out)
- }
- return true
-}
-
-func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
- switch out.Kind() {
- case reflect.Struct:
- return d.mappingStruct(n, out)
- case reflect.Slice:
- return d.mappingSlice(n, out)
- case reflect.Map:
- // okay
- case reflect.Interface:
- if d.mapType.Kind() == reflect.Map {
- iface := out
- out = reflect.MakeMap(d.mapType)
- iface.Set(out)
- } else {
- slicev := reflect.New(d.mapType).Elem()
- if !d.mappingSlice(n, slicev) {
- return false
- }
- out.Set(slicev)
- return true
- }
- default:
- d.terror(n, yaml_MAP_TAG, out)
- return false
- }
- outt := out.Type()
- kt := outt.Key()
- et := outt.Elem()
-
- mapType := d.mapType
- if outt.Key() == ifaceType && outt.Elem() == ifaceType {
- d.mapType = outt
- }
-
- if out.IsNil() {
- out.Set(reflect.MakeMap(outt))
- }
- l := len(n.children)
- for i := 0; i < l; i += 2 {
- if isMerge(n.children[i]) {
- d.merge(n.children[i+1], out)
- continue
- }
- k := reflect.New(kt).Elem()
- if d.unmarshal(n.children[i], k) {
- kkind := k.Kind()
- if kkind == reflect.Interface {
- kkind = k.Elem().Kind()
- }
- if kkind == reflect.Map || kkind == reflect.Slice {
- failf("invalid map key: %#v", k.Interface())
- }
- e := reflect.New(et).Elem()
- if d.unmarshal(n.children[i+1], e) {
- out.SetMapIndex(k, e)
- }
- }
- }
- d.mapType = mapType
- return true
-}
-
-func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
- outt := out.Type()
- if outt.Elem() != mapItemType {
- d.terror(n, yaml_MAP_TAG, out)
- return false
- }
-
- mapType := d.mapType
- d.mapType = outt
-
- var slice []MapItem
- var l = len(n.children)
- for i := 0; i < l; i += 2 {
- if isMerge(n.children[i]) {
- d.merge(n.children[i+1], out)
- continue
- }
- item := MapItem{}
- k := reflect.ValueOf(&item.Key).Elem()
- if d.unmarshal(n.children[i], k) {
- v := reflect.ValueOf(&item.Value).Elem()
- if d.unmarshal(n.children[i+1], v) {
- slice = append(slice, item)
- }
- }
- }
- out.Set(reflect.ValueOf(slice))
- d.mapType = mapType
- return true
-}
-
-func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
- sinfo, err := getStructInfo(out.Type())
- if err != nil {
- panic(err)
- }
- name := settableValueOf("")
- l := len(n.children)
- for i := 0; i < l; i += 2 {
- ni := n.children[i]
- if isMerge(ni) {
- d.merge(n.children[i+1], out)
- continue
- }
- if !d.unmarshal(ni, name) {
- continue
- }
- if info, ok := sinfo.FieldsMap[name.String()]; ok {
- var field reflect.Value
- if info.Inline == nil {
- field = out.Field(info.Num)
- } else {
- field = out.FieldByIndex(info.Inline)
- }
- d.unmarshal(n.children[i+1], field)
- }
- }
- return true
-}
-
-func failWantMap() {
- failf("map merge requires map or sequence of maps as the value")
-}
-
-func (d *decoder) merge(n *node, out reflect.Value) {
- switch n.kind {
- case mappingNode:
- d.unmarshal(n, out)
- case aliasNode:
- an, ok := d.doc.anchors[n.value]
- if ok && an.kind != mappingNode {
- failWantMap()
- }
- d.unmarshal(n, out)
- case sequenceNode:
- // Step backwards as earlier nodes take precedence.
- for i := len(n.children) - 1; i >= 0; i-- {
- ni := n.children[i]
- if ni.kind == aliasNode {
- an, ok := d.doc.anchors[ni.value]
- if ok && an.kind != mappingNode {
- failWantMap()
- }
- } else if ni.kind != mappingNode {
- failWantMap()
- }
- d.unmarshal(ni, out)
- }
- default:
- failWantMap()
- }
-}
-
-func isMerge(n *node) bool {
- return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go
deleted file mode 100644
index 9b3dc4a437..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go
+++ /dev/null
@@ -1,1685 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// Flush the buffer if needed.
-func flush(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) {
- return yaml_emitter_flush(emitter)
- }
- return true
-}
-
-// Put a character to the output buffer.
-func put(emitter *yaml_emitter_t, value byte) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.buffer[emitter.buffer_pos] = value
- emitter.buffer_pos++
- emitter.column++
- return true
-}
-
-// Put a line break to the output buffer.
-func put_break(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- switch emitter.line_break {
- case yaml_CR_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\r'
- emitter.buffer_pos += 1
- case yaml_LN_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\n'
- emitter.buffer_pos += 1
- case yaml_CRLN_BREAK:
- emitter.buffer[emitter.buffer_pos+0] = '\r'
- emitter.buffer[emitter.buffer_pos+1] = '\n'
- emitter.buffer_pos += 2
- default:
- panic("unknown line break setting")
- }
- emitter.column = 0
- emitter.line++
- return true
-}
-
-// Copy a character from a string into buffer.
-func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- p := emitter.buffer_pos
- w := width(s[*i])
- switch w {
- case 4:
- emitter.buffer[p+3] = s[*i+3]
- fallthrough
- case 3:
- emitter.buffer[p+2] = s[*i+2]
- fallthrough
- case 2:
- emitter.buffer[p+1] = s[*i+1]
- fallthrough
- case 1:
- emitter.buffer[p+0] = s[*i+0]
- default:
- panic("unknown character width")
- }
- emitter.column++
- emitter.buffer_pos += w
- *i += w
- return true
-}
-
-// Write a whole string into buffer.
-func write_all(emitter *yaml_emitter_t, s []byte) bool {
- for i := 0; i < len(s); {
- if !write(emitter, s, &i) {
- return false
- }
- }
- return true
-}
-
-// Copy a line break character from a string into buffer.
-func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if s[*i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- *i++
- } else {
- if !write(emitter, s, i) {
- return false
- }
- emitter.column = 0
- emitter.line++
- }
- return true
-}
-
-// Set an emitter error and return false.
-func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_EMITTER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Emit an event.
-func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.events = append(emitter.events, *event)
- for !yaml_emitter_need_more_events(emitter) {
- event := &emitter.events[emitter.events_head]
- if !yaml_emitter_analyze_event(emitter, event) {
- return false
- }
- if !yaml_emitter_state_machine(emitter, event) {
- return false
- }
- yaml_event_delete(event)
- emitter.events_head++
- }
- return true
-}
-
-// Check if we need to accumulate more events before emitting.
-//
-// We accumulate extra
-// - 1 event for DOCUMENT-START
-// - 2 events for SEQUENCE-START
-// - 3 events for MAPPING-START
-//
-func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
- if emitter.events_head == len(emitter.events) {
- return true
- }
- var accumulate int
- switch emitter.events[emitter.events_head].typ {
- case yaml_DOCUMENT_START_EVENT:
- accumulate = 1
- break
- case yaml_SEQUENCE_START_EVENT:
- accumulate = 2
- break
- case yaml_MAPPING_START_EVENT:
- accumulate = 3
- break
- default:
- return false
- }
- if len(emitter.events)-emitter.events_head > accumulate {
- return false
- }
- var level int
- for i := emitter.events_head; i < len(emitter.events); i++ {
- switch emitter.events[i].typ {
- case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
- level++
- case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
- level--
- }
- if level == 0 {
- return false
- }
- }
- return true
-}
-
-// Append a directive to the directives stack.
-func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
- for i := 0; i < len(emitter.tag_directives); i++ {
- if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
- }
- }
-
- // [Go] Do we actually need to copy this given garbage collection
- // and the lack of deallocating destructors?
- tag_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(tag_copy.handle, value.handle)
- copy(tag_copy.prefix, value.prefix)
- emitter.tag_directives = append(emitter.tag_directives, tag_copy)
- return true
-}
-
-// Increase the indentation level.
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
- emitter.indents = append(emitter.indents, emitter.indent)
- if emitter.indent < 0 {
- if flow {
- emitter.indent = emitter.best_indent
- } else {
- emitter.indent = 0
- }
- } else if !indentless {
- emitter.indent += emitter.best_indent
- }
- return true
-}
-
-// State dispatcher.
-func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- switch emitter.state {
- default:
- case yaml_EMIT_STREAM_START_STATE:
- return yaml_emitter_emit_stream_start(emitter, event)
-
- case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, true)
-
- case yaml_EMIT_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, false)
-
- case yaml_EMIT_DOCUMENT_CONTENT_STATE:
- return yaml_emitter_emit_document_content(emitter, event)
-
- case yaml_EMIT_DOCUMENT_END_STATE:
- return yaml_emitter_emit_document_end(emitter, event)
-
- case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
-
- case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
-
- case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, true)
-
- case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, false)
-
- case yaml_EMIT_END_STATE:
- return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
- }
- panic("invalid emitter state")
-}
-
-// Expect STREAM-START.
-func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_STREAM_START_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
- }
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = event.encoding
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = yaml_UTF8_ENCODING
- }
- }
- if emitter.best_indent < 2 || emitter.best_indent > 9 {
- emitter.best_indent = 2
- }
- if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
- emitter.best_width = 80
- }
- if emitter.best_width < 0 {
- emitter.best_width = 1<<31 - 1
- }
- if emitter.line_break == yaml_ANY_BREAK {
- emitter.line_break = yaml_LN_BREAK
- }
-
- emitter.indent = -1
- emitter.line = 0
- emitter.column = 0
- emitter.whitespace = true
- emitter.indention = true
-
- if emitter.encoding != yaml_UTF8_ENCODING {
- if !yaml_emitter_write_bom(emitter) {
- return false
- }
- }
- emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
- return true
-}
-
-// Expect DOCUMENT-START or STREAM-END.
-func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-
- if event.typ == yaml_DOCUMENT_START_EVENT {
-
- if event.version_directive != nil {
- if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
- return false
- }
- }
-
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
- return false
- }
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
- return false
- }
- }
-
- for i := 0; i < len(default_tag_directives); i++ {
- tag_directive := &default_tag_directives[i]
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
- return false
- }
- }
-
- implicit := event.implicit
- if !first || emitter.canonical {
- implicit = false
- }
-
- if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if event.version_directive != nil {
- implicit = false
- if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if len(event.tag_directives) > 0 {
- implicit = false
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- if yaml_emitter_check_empty_document(emitter) {
- implicit = false
- }
- if !implicit {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
- return false
- }
- if emitter.canonical {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
- return true
- }
-
- if event.typ == yaml_STREAM_END_EVENT {
- if emitter.open_ended {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_END_STATE
- return true
- }
-
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
-}
-
-// Expect the root node.
-func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
- return yaml_emitter_emit_node(emitter, event, true, false, false, false)
-}
-
-// Expect DOCUMENT-END.
-func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_DOCUMENT_END_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !event.implicit {
- // [Go] Allocate the slice elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_DOCUMENT_START_STATE
- emitter.tag_directives = emitter.tag_directives[:0]
- return true
-}
-
-// Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
-
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
-
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a flow value node.
-func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block item node.
-func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
- return false
- }
- }
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a block key node.
-func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, false) {
- return false
- }
- }
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block value node.
-func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a node.
-func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
- root bool, sequence bool, mapping bool, simple_key bool) bool {
-
- emitter.root_context = root
- emitter.sequence_context = sequence
- emitter.mapping_context = mapping
- emitter.simple_key_context = simple_key
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- return yaml_emitter_emit_alias(emitter, event)
- case yaml_SCALAR_EVENT:
- return yaml_emitter_emit_scalar(emitter, event)
- case yaml_SEQUENCE_START_EVENT:
- return yaml_emitter_emit_sequence_start(emitter, event)
- case yaml_MAPPING_START_EVENT:
- return yaml_emitter_emit_mapping_start(emitter, event)
- default:
- return yaml_emitter_set_emitter_error(emitter,
- "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
- }
- return false
-}
-
-// Expect ALIAS.
-func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SCALAR.
-func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_select_scalar_style(emitter, event) {
- return false
- }
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- if !yaml_emitter_process_scalar(emitter) {
- return false
- }
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SEQUENCE-START.
-func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
- yaml_emitter_check_empty_sequence(emitter) {
- emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
- }
- return true
-}
-
-// Expect MAPPING-START.
-func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
- yaml_emitter_check_empty_mapping(emitter) {
- emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
- }
- return true
-}
-
-// Check if the document content is an empty scalar.
-func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
- return false // [Go] Huh?
-}
-
-// Check if the next events represent an empty sequence.
-func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
-}
-
-// Check if the next events represent an empty mapping.
-func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
-}
-
-// Check if the next node can be expressed as a simple key.
-func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
- length := 0
- switch emitter.events[emitter.events_head].typ {
- case yaml_ALIAS_EVENT:
- length += len(emitter.anchor_data.anchor)
- case yaml_SCALAR_EVENT:
- if emitter.scalar_data.multiline {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix) +
- len(emitter.scalar_data.value)
- case yaml_SEQUENCE_START_EVENT:
- if !yaml_emitter_check_empty_sequence(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- case yaml_MAPPING_START_EVENT:
- if !yaml_emitter_check_empty_mapping(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- default:
- return false
- }
- return length <= 128
-}
-
-// Determine an acceptable scalar style.
-func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
- if no_tag && !event.implicit && !event.quoted_implicit {
- return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
- }
-
- style := event.scalar_style()
- if style == yaml_ANY_SCALAR_STYLE {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- if emitter.canonical {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- if emitter.simple_key_context && emitter.scalar_data.multiline {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
-
- if style == yaml_PLAIN_SCALAR_STYLE {
- if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
- emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if no_tag && !event.implicit {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
- if !emitter.scalar_data.single_quoted_allowed {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
- if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
-
- if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
- emitter.tag_data.handle = []byte{'!'}
- }
- emitter.scalar_data.style = style
- return true
-}
-
-// Write an achor.
-func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
- if emitter.anchor_data.anchor == nil {
- return true
- }
- c := []byte{'&'}
- if emitter.anchor_data.alias {
- c[0] = '*'
- }
- if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
- return false
- }
- return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
-}
-
-// Write a tag.
-func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
- if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
- return true
- }
- if len(emitter.tag_data.handle) > 0 {
- if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
- return false
- }
- if len(emitter.tag_data.suffix) > 0 {
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- }
- } else {
- // [Go] Allocate these slices elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
- return false
- }
- }
- return true
-}
-
-// Write a scalar.
-func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
- switch emitter.scalar_data.style {
- case yaml_PLAIN_SCALAR_STYLE:
- return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_SINGLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_LITERAL_SCALAR_STYLE:
- return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
-
- case yaml_FOLDED_SCALAR_STYLE:
- return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
- }
- panic("unknown scalar style")
-}
-
-// Check if a %YAML directive is valid.
-func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
- if version_directive.major != 1 || version_directive.minor != 1 {
- return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
- }
- return true
-}
-
-// Check if a %TAG directive is valid.
-func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
- handle := tag_directive.handle
- prefix := tag_directive.prefix
- if len(handle) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
- }
- if handle[0] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
- }
- if handle[len(handle)-1] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
- }
- for i := 1; i < len(handle)-1; i += width(handle[i]) {
- if !is_alpha(handle, i) {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
- }
- }
- if len(prefix) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
- }
- return true
-}
-
-// Check if an anchor is valid.
-func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
- if len(anchor) == 0 {
- problem := "anchor value must not be empty"
- if alias {
- problem = "alias value must not be empty"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- for i := 0; i < len(anchor); i += width(anchor[i]) {
- if !is_alpha(anchor, i) {
- problem := "anchor value must contain alphanumerical characters only"
- if alias {
- problem = "alias value must contain alphanumerical characters only"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- }
- emitter.anchor_data.anchor = anchor
- emitter.anchor_data.alias = alias
- return true
-}
-
-// Check if a tag is valid.
-func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
- if len(tag) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
- }
- for i := 0; i < len(emitter.tag_directives); i++ {
- tag_directive := &emitter.tag_directives[i]
- if bytes.HasPrefix(tag, tag_directive.prefix) {
- emitter.tag_data.handle = tag_directive.handle
- emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
- return true
- }
- }
- emitter.tag_data.suffix = tag
- return true
-}
-
-// Check if a scalar is valid.
-func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
- var (
- block_indicators = false
- flow_indicators = false
- line_breaks = false
- special_characters = false
-
- leading_space = false
- leading_break = false
- trailing_space = false
- trailing_break = false
- break_space = false
- space_break = false
-
- preceeded_by_whitespace = false
- followed_by_whitespace = false
- previous_space = false
- previous_break = false
- )
-
- emitter.scalar_data.value = value
-
- if len(value) == 0 {
- emitter.scalar_data.multiline = false
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = false
- return true
- }
-
- if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
- block_indicators = true
- flow_indicators = true
- }
-
- preceeded_by_whitespace = true
- for i, w := 0, 0; i < len(value); i += w {
- w = width(value[0])
- followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
-
- if i == 0 {
- switch value[i] {
- case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
- flow_indicators = true
- block_indicators = true
- case '?', ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '-':
- if followed_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- } else {
- switch value[i] {
- case ',', '?', '[', ']', '{', '}':
- flow_indicators = true
- case ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '#':
- if preceeded_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- }
-
- if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
- special_characters = true
- }
- if is_space(value, i) {
- if i == 0 {
- leading_space = true
- }
- if i+width(value[i]) == len(value) {
- trailing_space = true
- }
- if previous_break {
- break_space = true
- }
- previous_space = true
- previous_break = false
- } else if is_break(value, i) {
- line_breaks = true
- if i == 0 {
- leading_break = true
- }
- if i+width(value[i]) == len(value) {
- trailing_break = true
- }
- if previous_space {
- space_break = true
- }
- previous_space = false
- previous_break = true
- } else {
- previous_space = false
- previous_break = false
- }
-
- // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
- preceeded_by_whitespace = is_blankz(value, i)
- }
-
- emitter.scalar_data.multiline = line_breaks
- emitter.scalar_data.flow_plain_allowed = true
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = true
-
- if leading_space || leading_break || trailing_space || trailing_break {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if trailing_space {
- emitter.scalar_data.block_allowed = false
- }
- if break_space {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- }
- if space_break || special_characters {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- emitter.scalar_data.block_allowed = false
- }
- if line_breaks {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if flow_indicators {
- emitter.scalar_data.flow_plain_allowed = false
- }
- if block_indicators {
- emitter.scalar_data.block_plain_allowed = false
- }
- return true
-}
-
-// Check if the event data is valid.
-func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- emitter.anchor_data.anchor = nil
- emitter.tag_data.handle = nil
- emitter.tag_data.suffix = nil
- emitter.scalar_data.value = nil
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
- return false
- }
-
- case yaml_SCALAR_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- if !yaml_emitter_analyze_scalar(emitter, event.value) {
- return false
- }
-
- case yaml_SEQUENCE_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
-
- case yaml_MAPPING_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- }
- return true
-}
-
-// Write the BOM character.
-func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
- if !flush(emitter) {
- return false
- }
- pos := emitter.buffer_pos
- emitter.buffer[pos+0] = '\xEF'
- emitter.buffer[pos+1] = '\xBB'
- emitter.buffer[pos+2] = '\xBF'
- emitter.buffer_pos += 3
- return true
-}
-
-func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
- indent := emitter.indent
- if indent < 0 {
- indent = 0
- }
- if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
- if !put_break(emitter) {
- return false
- }
- }
- for emitter.column < indent {
- if !put(emitter, ' ') {
- return false
- }
- }
- emitter.whitespace = true
- emitter.indention = true
- return true
-}
-
-func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, indicator) {
- return false
- }
- emitter.whitespace = is_whitespace
- emitter.indention = (emitter.indention && is_indention)
- emitter.open_ended = false
- return true
-}
-
-func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- for i := 0; i < len(value); {
- var must_write bool
- switch value[i] {
- case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
- must_write = true
- default:
- must_write = is_alpha(value, i)
- }
- if must_write {
- if !write(emitter, value, &i) {
- return false
- }
- } else {
- w := width(value[i])
- for k := 0; k < w; k++ {
- octet := value[i]
- i++
- if !put(emitter, '%') {
- return false
- }
-
- c := octet >> 4
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
-
- c = octet & 0x0f
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
- }
- }
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
-
- emitter.whitespace = false
- emitter.indention = false
- if emitter.root_context {
- emitter.open_ended = true
- }
-
- return true
-}
-
-func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
- return false
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if value[i] == '\'' {
- if !put(emitter, '\'') {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- spaces := false
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
- return false
- }
-
- for i := 0; i < len(value); {
- if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
- is_bom(value, i) || is_break(value, i) ||
- value[i] == '"' || value[i] == '\\' {
-
- octet := value[i]
-
- var w int
- var v rune
- switch {
- case octet&0x80 == 0x00:
- w, v = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, v = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, v = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, v = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = value[i+k]
- v = (v << 6) + (rune(octet) & 0x3F)
- }
- i += w
-
- if !put(emitter, '\\') {
- return false
- }
-
- var ok bool
- switch v {
- case 0x00:
- ok = put(emitter, '0')
- case 0x07:
- ok = put(emitter, 'a')
- case 0x08:
- ok = put(emitter, 'b')
- case 0x09:
- ok = put(emitter, 't')
- case 0x0A:
- ok = put(emitter, 'n')
- case 0x0b:
- ok = put(emitter, 'v')
- case 0x0c:
- ok = put(emitter, 'f')
- case 0x0d:
- ok = put(emitter, 'r')
- case 0x1b:
- ok = put(emitter, 'e')
- case 0x22:
- ok = put(emitter, '"')
- case 0x5c:
- ok = put(emitter, '\\')
- case 0x85:
- ok = put(emitter, 'N')
- case 0xA0:
- ok = put(emitter, '_')
- case 0x2028:
- ok = put(emitter, 'L')
- case 0x2029:
- ok = put(emitter, 'P')
- default:
- if v <= 0xFF {
- ok = put(emitter, 'x')
- w = 2
- } else if v <= 0xFFFF {
- ok = put(emitter, 'u')
- w = 4
- } else {
- ok = put(emitter, 'U')
- w = 8
- }
- for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
- digit := byte((v >> uint(k)) & 0x0F)
- if digit < 10 {
- ok = put(emitter, digit+'0')
- } else {
- ok = put(emitter, digit+'A'-10)
- }
- }
- }
- if !ok {
- return false
- }
- spaces = false
- } else if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if is_space(value, i+1) {
- if !put(emitter, '\\') {
- return false
- }
- }
- i += width(value[i])
- } else if !write(emitter, value, &i) {
- return false
- }
- spaces = true
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- spaces = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
- if is_space(value, 0) || is_break(value, 0) {
- indent_hint := []byte{'0' + byte(emitter.best_indent)}
- if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
- return false
- }
- }
-
- emitter.open_ended = false
-
- var chomp_hint [1]byte
- if len(value) == 0 {
- chomp_hint[0] = '-'
- } else {
- i := len(value) - 1
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if !is_break(value, i) {
- chomp_hint[0] = '-'
- } else if i == 0 {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- } else {
- i--
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if is_break(value, i) {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- }
- }
- }
- if chomp_hint[0] != 0 {
- if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
- return false
- }
- }
- return true
-}
-
-func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
- breaks := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- breaks = false
- }
- }
-
- return true
-}
-
-func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
-
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
-
- breaks := true
- leading_spaces := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !breaks && !leading_spaces && value[i] == '\n' {
- k := 0
- for is_break(value, k) {
- k += width(value[k])
- }
- if !is_blankz(value, k) {
- if !put_break(emitter) {
- return false
- }
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- leading_spaces = is_blank(value, i)
- }
- if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- emitter.indention = false
- breaks = false
- }
- }
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go
deleted file mode 100644
index b7edc799d1..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package yaml
-
-import (
- "encoding"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type encoder struct {
- emitter yaml_emitter_t
- event yaml_event_t
- out []byte
- flow bool
-}
-
-func newEncoder() (e *encoder) {
- e = &encoder{}
- e.must(yaml_emitter_initialize(&e.emitter))
- yaml_emitter_set_output_string(&e.emitter, &e.out)
- yaml_emitter_set_unicode(&e.emitter, true)
- e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
- e.emit()
- e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
- e.emit()
- return e
-}
-
-func (e *encoder) finish() {
- e.must(yaml_document_end_event_initialize(&e.event, true))
- e.emit()
- e.emitter.open_ended = false
- e.must(yaml_stream_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) destroy() {
- yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
- // This will internally delete the e.event value.
- if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
- e.must(false)
- }
-}
-
-func (e *encoder) must(ok bool) {
- if !ok {
- msg := e.emitter.problem
- if msg == "" {
- msg = "unknown problem generating YAML content"
- }
- failf("%s", msg)
- }
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
- if !in.IsValid() {
- e.nilv()
- return
- }
- iface := in.Interface()
- if m, ok := iface.(Marshaler); ok {
- v, err := m.MarshalYAML()
- if err != nil {
- fail(err)
- }
- if v == nil {
- e.nilv()
- return
- }
- in = reflect.ValueOf(v)
- } else if m, ok := iface.(encoding.TextMarshaler); ok {
- text, err := m.MarshalText()
- if err != nil {
- fail(err)
- }
- in = reflect.ValueOf(string(text))
- }
- switch in.Kind() {
- case reflect.Interface:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Map:
- e.mapv(tag, in)
- case reflect.Ptr:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Struct:
- e.structv(tag, in)
- case reflect.Slice:
- if in.Type().Elem() == mapItemType {
- e.itemsv(tag, in)
- } else {
- e.slicev(tag, in)
- }
- case reflect.String:
- e.stringv(tag, in)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if in.Type() == durationType {
- e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
- } else {
- e.intv(tag, in)
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- e.uintv(tag, in)
- case reflect.Float32, reflect.Float64:
- e.floatv(tag, in)
- case reflect.Bool:
- e.boolv(tag, in)
- default:
- panic("cannot marshal type: " + in.Type().String())
- }
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- keys := keyList(in.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- e.marshal("", k)
- e.marshal("", in.MapIndex(k))
- }
- })
-}
-
-func (e *encoder) itemsv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
- for _, item := range slice {
- e.marshal("", reflect.ValueOf(item.Key))
- e.marshal("", reflect.ValueOf(item.Value))
- }
- })
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
- sinfo, err := getStructInfo(in.Type())
- if err != nil {
- panic(err)
- }
- e.mappingv(tag, func() {
- for _, info := range sinfo.FieldsList {
- var value reflect.Value
- if info.Inline == nil {
- value = in.Field(info.Num)
- } else {
- value = in.FieldByIndex(info.Inline)
- }
- if info.OmitEmpty && isZero(value) {
- continue
- }
- e.marshal("", reflect.ValueOf(info.Key))
- e.flow = info.Flow
- e.marshal("", value)
- }
- })
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
- implicit := tag == ""
- style := yaml_BLOCK_MAPPING_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_MAPPING_STYLE
- }
- e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- f()
- e.must(yaml_mapping_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
- implicit := tag == ""
- style := yaml_BLOCK_SEQUENCE_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_SEQUENCE_STYLE
- }
- e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- n := in.Len()
- for i := 0; i < n; i++ {
- e.marshal("", in.Index(i))
- }
- e.must(yaml_sequence_end_event_initialize(&e.event))
- e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
- // Fast path.
- if s == "" {
- return false
- }
- c := s[0]
- if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
- return false
- }
- // Do the full match.
- return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
- var style yaml_scalar_style_t
- s := in.String()
- rtag, rs := resolve("", s)
- if rtag == yaml_BINARY_TAG {
- if tag == "" || tag == yaml_STR_TAG {
- tag = rtag
- s = rs.(string)
- } else if tag == yaml_BINARY_TAG {
- failf("explicitly tagged !!binary data must be base64-encoded")
- } else {
- failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
- }
- }
- if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- } else if strings.Contains(s, "\n") {
- style = yaml_LITERAL_SCALAR_STYLE
- } else {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- e.emitScalar(s, "", tag, style)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
- var s string
- if in.Bool() {
- s = "true"
- } else {
- s = "false"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
- s := strconv.FormatInt(in.Int(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
- s := strconv.FormatUint(in.Uint(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
- // FIXME: Handle 64 bits here.
- s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
- switch s {
- case "+Inf":
- s = ".inf"
- case "-Inf":
- s = "-.inf"
- case "NaN":
- s = ".nan"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) nilv() {
- e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
- implicit := tag == ""
- e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
- e.emit()
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go
deleted file mode 100644
index 0a7037ad1b..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go
+++ /dev/null
@@ -1,1096 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// The parser implements the following grammar:
-//
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// implicit_document ::= block_node DOCUMENT-END*
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// | properties (block_content | indentless_block_sequence)?
-// | block_content
-// | indentless_block_sequence
-// block_node ::= ALIAS
-// | properties block_content?
-// | block_content
-// flow_node ::= ALIAS
-// | properties flow_content?
-// | flow_content
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// block_content ::= block_collection | flow_collection | SCALAR
-// flow_content ::= flow_collection | SCALAR
-// block_collection ::= block_sequence | block_mapping
-// flow_collection ::= flow_sequence | flow_mapping
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// block_mapping ::= BLOCK-MAPPING_START
-// ((KEY block_node_or_indentless_sequence?)?
-// (VALUE block_node_or_indentless_sequence?)?)*
-// BLOCK-END
-// flow_sequence ::= FLOW-SEQUENCE-START
-// (flow_sequence_entry FLOW-ENTRY)*
-// flow_sequence_entry?
-// FLOW-SEQUENCE-END
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// flow_mapping ::= FLOW-MAPPING-START
-// (flow_mapping_entry FLOW-ENTRY)*
-// flow_mapping_entry?
-// FLOW-MAPPING-END
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-// Peek the next token in the token queue.
-func peek_token(parser *yaml_parser_t) *yaml_token_t {
- if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
- return &parser.tokens[parser.tokens_head]
- }
- return nil
-}
-
-// Remove the next token from the queue (must be called after peek_token).
-func skip_token(parser *yaml_parser_t) {
- parser.token_available = false
- parser.tokens_parsed++
- parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
- parser.tokens_head++
-}
-
-// Get the next event.
-func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
- // Erase the event object.
- *event = yaml_event_t{}
-
- // No events after the end of the stream or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
- return true
- }
-
- // Generate the next event.
- return yaml_parser_state_machine(parser, event)
-}
-
-// Set parser error.
-func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-// State dispatcher.
-func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
- //trace("yaml_parser_state_machine", "state:", parser.state.String())
-
- switch parser.state {
- case yaml_PARSE_STREAM_START_STATE:
- return yaml_parser_parse_stream_start(parser, event)
-
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, true)
-
- case yaml_PARSE_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, false)
-
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return yaml_parser_parse_document_content(parser, event)
-
- case yaml_PARSE_DOCUMENT_END_STATE:
- return yaml_parser_parse_document_end(parser, event)
-
- case yaml_PARSE_BLOCK_NODE_STATE:
- return yaml_parser_parse_node(parser, event, true, false)
-
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return yaml_parser_parse_node(parser, event, true, true)
-
- case yaml_PARSE_FLOW_NODE_STATE:
- return yaml_parser_parse_node(parser, event, false, false)
-
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, true)
-
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, false)
-
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_indentless_sequence_entry(parser, event)
-
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, true)
-
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, false)
-
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return yaml_parser_parse_block_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, true)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, false)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
-
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, true)
-
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, true)
-
- default:
- panic("invalid parser state")
- }
- return false
-}
-
-// Parse the production:
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// ************
-func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_STREAM_START_TOKEN {
- return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
- }
- parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- encoding: token.encoding,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// *************************
-func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- // Parse extra document end indicators.
- if !implicit {
- for token.typ == yaml_DOCUMENT_END_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
- token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
- token.typ != yaml_DOCUMENT_START_TOKEN &&
- token.typ != yaml_STREAM_END_TOKEN {
- // Parse an implicit document.
- if !yaml_parser_process_directives(parser, nil, nil) {
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_BLOCK_NODE_STATE
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- } else if token.typ != yaml_STREAM_END_TOKEN {
- // Parse an explicit document.
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
- start_mark := token.start_mark
- if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
- return false
- }
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_DOCUMENT_START_TOKEN {
- yaml_parser_set_parser_error(parser,
- "did not find expected ", token.start_mark)
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
- end_mark := token.end_mark
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: false,
- }
- skip_token(parser)
-
- } else {
- // Parse the stream end.
- parser.state = yaml_PARSE_END_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- }
-
- return true
-}
-
-// Parse the productions:
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// ***********
-//
-func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
- token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
- token.typ == yaml_DOCUMENT_START_TOKEN ||
- token.typ == yaml_DOCUMENT_END_TOKEN ||
- token.typ == yaml_STREAM_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- return yaml_parser_process_empty_scalar(parser, event,
- token.start_mark)
- }
- return yaml_parser_parse_node(parser, event, true, false)
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *************
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//
-func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- implicit := true
- if token.typ == yaml_DOCUMENT_END_TOKEN {
- end_mark = token.end_mark
- skip_token(parser)
- implicit = false
- }
-
- parser.tag_directives = parser.tag_directives[:0]
-
- parser.state = yaml_PARSE_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- implicit: implicit,
- }
- return true
-}
-
-// Parse the productions:
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// *****
-// | properties (block_content | indentless_block_sequence)?
-// ********** *
-// | block_content | indentless_block_sequence
-// *
-// block_node ::= ALIAS
-// *****
-// | properties block_content?
-// ********** *
-// | block_content
-// *
-// flow_node ::= ALIAS
-// *****
-// | properties flow_content?
-// ********** *
-// | flow_content
-// *
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// *************************
-// block_content ::= block_collection | flow_collection | SCALAR
-// ******
-// flow_content ::= flow_collection | SCALAR
-// ******
-func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
- //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_ALIAS_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- *event = yaml_event_t{
- typ: yaml_ALIAS_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- anchor: token.value,
- }
- skip_token(parser)
- return true
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- var tag_token bool
- var tag_handle, tag_suffix, anchor []byte
- var tag_mark yaml_mark_t
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- start_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- } else if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- start_mark = token.start_mark
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- var tag []byte
- if tag_token {
- if len(tag_handle) == 0 {
- tag = tag_suffix
- tag_suffix = nil
- } else {
- for i := range parser.tag_directives {
- if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
- tag = append([]byte(nil), parser.tag_directives[i].prefix...)
- tag = append(tag, tag_suffix...)
- break
- }
- }
- if len(tag) == 0 {
- yaml_parser_set_parser_error_context(parser,
- "while parsing a node", start_mark,
- "found undefined tag handle", tag_mark)
- return false
- }
- }
- }
-
- implicit := len(tag) == 0
- if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_SCALAR_TOKEN {
- var plain_implicit, quoted_implicit bool
- end_mark = token.end_mark
- if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
- plain_implicit = true
- } else if len(tag) == 0 {
- quoted_implicit = true
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- value: token.value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(token.style),
- }
- skip_token(parser)
- return true
- }
- if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
- // [Go] Some of the events below can be merged as they differ only on style.
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
- }
- return true
- }
- if len(anchor) > 0 || len(tag) > 0 {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- quoted_implicit: false,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
- }
-
- context := "while parsing a flow node"
- if block {
- context = "while parsing a block node"
- }
- yaml_parser_set_parser_error_context(parser, context, start_mark,
- "did not find expected node content", token.start_mark)
- return false
-}
-
-// Parse the productions:
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// ******************** *********** * *********
-//
-func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- } else {
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- }
- if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block collection", context_mark,
- "did not find expected '-' indicator", token.start_mark)
-}
-
-// Parse the productions:
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// *********** *
-func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
- token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- }
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-// *******************
-// ((KEY block_node_or_indentless_sequence?)?
-// *** *
-// (VALUE block_node_or_indentless_sequence?)?)*
-//
-// BLOCK-END
-// *********
-//
-func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_KEY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- } else {
- parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- } else if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block mapping", context_mark,
- "did not find expected key", token.start_mark)
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-//
-// ((KEY block_node_or_indentless_sequence?)?
-//
-// (VALUE block_node_or_indentless_sequence?)?)*
-// ***** *
-// BLOCK-END
-//
-//
-func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence ::= FLOW-SEQUENCE-START
-// *******************
-// (flow_sequence_entry FLOW-ENTRY)*
-// * **********
-// flow_sequence_entry?
-// *
-// FLOW-SEQUENCE-END
-// *****************
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow sequence", context_mark,
- "did not find expected ',' or ']'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- implicit: true,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- skip_token(parser)
- return true
- } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
-}
-
-//
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- mark := token.end_mark
- skip_token(parser)
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// ***** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// flow_mapping ::= FLOW-MAPPING-START
-// ******************
-// (flow_mapping_entry FLOW-ENTRY)*
-// * **********
-// flow_mapping_entry?
-// ******************
-// FLOW-MAPPING-END
-// ****************
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * *** *
-//
-func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow mapping", context_mark,
- "did not find expected ',' or '}'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- } else {
- parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * ***** *
-//
-func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if empty {
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Generate an empty scalar event.
-func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: mark,
- end_mark: mark,
- value: nil, // Empty
- implicit: true,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
-}
-
-var default_tag_directives = []yaml_tag_directive_t{
- {[]byte("!"), []byte("!")},
- {[]byte("!!"), []byte("tag:yaml.org,2002:")},
-}
-
-// Parse directives.
-func yaml_parser_process_directives(parser *yaml_parser_t,
- version_directive_ref **yaml_version_directive_t,
- tag_directives_ref *[]yaml_tag_directive_t) bool {
-
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
- if version_directive != nil {
- yaml_parser_set_parser_error(parser,
- "found duplicate %YAML directive", token.start_mark)
- return false
- }
- if token.major != 1 || token.minor != 1 {
- yaml_parser_set_parser_error(parser,
- "found incompatible YAML document", token.start_mark)
- return false
- }
- version_directive = &yaml_version_directive_t{
- major: token.major,
- minor: token.minor,
- }
- } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- value := yaml_tag_directive_t{
- handle: token.value,
- prefix: token.prefix,
- }
- if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
- return false
- }
- tag_directives = append(tag_directives, value)
- }
-
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
-
- for i := range default_tag_directives {
- if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
- return false
- }
- }
-
- if version_directive_ref != nil {
- *version_directive_ref = version_directive
- }
- if tag_directives_ref != nil {
- *tag_directives_ref = tag_directives
- }
- return true
-}
-
-// Append a tag directive to the directives stack.
-func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
- for i := range parser.tag_directives {
- if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
- }
- }
-
- // [Go] I suspect the copy is unnecessary. This was likely done
- // because there was no way to track ownership of the data.
- value_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(value_copy.handle, value.handle)
- copy(value_copy.prefix, value.prefix)
- parser.tag_directives = append(parser.tag_directives, value_copy)
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go
deleted file mode 100644
index d5fb097277..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go
+++ /dev/null
@@ -1,391 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// Set the reader error and return 0.
-func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
- parser.error = yaml_READER_ERROR
- parser.problem = problem
- parser.problem_offset = offset
- parser.problem_value = value
- return false
-}
-
-// Byte order marks.
-const (
- bom_UTF8 = "\xef\xbb\xbf"
- bom_UTF16LE = "\xff\xfe"
- bom_UTF16BE = "\xfe\xff"
-)
-
-// Determine the input stream encoding by checking the BOM symbol. If no BOM is
-// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
-func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
- // Ensure that we had enough bytes in the raw buffer.
- for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
- if !yaml_parser_update_raw_buffer(parser) {
- return false
- }
- }
-
- // Determine the encoding.
- buf := parser.raw_buffer
- pos := parser.raw_buffer_pos
- avail := len(buf) - pos
- if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
- parser.encoding = yaml_UTF16LE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
- parser.encoding = yaml_UTF16BE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
- parser.encoding = yaml_UTF8_ENCODING
- parser.raw_buffer_pos += 3
- parser.offset += 3
- } else {
- parser.encoding = yaml_UTF8_ENCODING
- }
- return true
-}
-
-// Update the raw buffer.
-func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
- size_read := 0
-
- // Return if the raw buffer is full.
- if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
- return true
- }
-
- // Return on EOF.
- if parser.eof {
- return true
- }
-
- // Move the remaining bytes in the raw buffer to the beginning.
- if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
- copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
- }
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
- parser.raw_buffer_pos = 0
-
- // Call the read handler to fill the buffer.
- size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
- if err == io.EOF {
- parser.eof = true
- } else if err != nil {
- return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
- }
- return true
-}
-
-// Ensure that the buffer contains at least `length` characters.
-// Return true on success, false on failure.
-//
-// The length is supposed to be significantly less that the buffer size.
-func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
- if parser.read_handler == nil {
- panic("read handler must be set")
- }
-
- // If the EOF flag is set and the raw buffer is empty, do nothing.
- if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
- return true
- }
-
- // Return if the buffer contains enough characters.
- if parser.unread >= length {
- return true
- }
-
- // Determine the input encoding if it is not known yet.
- if parser.encoding == yaml_ANY_ENCODING {
- if !yaml_parser_determine_encoding(parser) {
- return false
- }
- }
-
- // Move the unread characters to the beginning of the buffer.
- buffer_len := len(parser.buffer)
- if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
- copy(parser.buffer, parser.buffer[parser.buffer_pos:])
- buffer_len -= parser.buffer_pos
- parser.buffer_pos = 0
- } else if parser.buffer_pos == buffer_len {
- buffer_len = 0
- parser.buffer_pos = 0
- }
-
- // Open the whole buffer for writing, and cut it before returning.
- parser.buffer = parser.buffer[:cap(parser.buffer)]
-
- // Fill the buffer until it has enough characters.
- first := true
- for parser.unread < length {
-
- // Fill the raw buffer if necessary.
- if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
- if !yaml_parser_update_raw_buffer(parser) {
- parser.buffer = parser.buffer[:buffer_len]
- return false
- }
- }
- first = false
-
- // Decode the raw buffer.
- inner:
- for parser.raw_buffer_pos != len(parser.raw_buffer) {
- var value rune
- var width int
-
- raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
-
- // Decode the next character.
- switch parser.encoding {
- case yaml_UTF8_ENCODING:
- // Decode a UTF-8 character. Check RFC 3629
- // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
- //
- // The following table (taken from the RFC) is used for
- // decoding.
- //
- // Char. number range | UTF-8 octet sequence
- // (hexadecimal) | (binary)
- // --------------------+------------------------------------
- // 0000 0000-0000 007F | 0xxxxxxx
- // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
- // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
- // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- //
- // Additionally, the characters in the range 0xD800-0xDFFF
- // are prohibited as they are reserved for use with UTF-16
- // surrogate pairs.
-
- // Determine the length of the UTF-8 sequence.
- octet := parser.raw_buffer[parser.raw_buffer_pos]
- switch {
- case octet&0x80 == 0x00:
- width = 1
- case octet&0xE0 == 0xC0:
- width = 2
- case octet&0xF0 == 0xE0:
- width = 3
- case octet&0xF8 == 0xF0:
- width = 4
- default:
- // The leading octet is invalid.
- return yaml_parser_set_reader_error(parser,
- "invalid leading UTF-8 octet",
- parser.offset, int(octet))
- }
-
- // Check if the raw buffer contains an incomplete character.
- if width > raw_unread {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-8 octet sequence",
- parser.offset, -1)
- }
- break inner
- }
-
- // Decode the leading octet.
- switch {
- case octet&0x80 == 0x00:
- value = rune(octet & 0x7F)
- case octet&0xE0 == 0xC0:
- value = rune(octet & 0x1F)
- case octet&0xF0 == 0xE0:
- value = rune(octet & 0x0F)
- case octet&0xF8 == 0xF0:
- value = rune(octet & 0x07)
- default:
- value = 0
- }
-
- // Check and decode the trailing octets.
- for k := 1; k < width; k++ {
- octet = parser.raw_buffer[parser.raw_buffer_pos+k]
-
- // Check if the octet is valid.
- if (octet & 0xC0) != 0x80 {
- return yaml_parser_set_reader_error(parser,
- "invalid trailing UTF-8 octet",
- parser.offset+k, int(octet))
- }
-
- // Decode the octet.
- value = (value << 6) + rune(octet&0x3F)
- }
-
- // Check the length of the sequence against the value.
- switch {
- case width == 1:
- case width == 2 && value >= 0x80:
- case width == 3 && value >= 0x800:
- case width == 4 && value >= 0x10000:
- default:
- return yaml_parser_set_reader_error(parser,
- "invalid length of a UTF-8 sequence",
- parser.offset, -1)
- }
-
- // Check the range of the value.
- if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
- return yaml_parser_set_reader_error(parser,
- "invalid Unicode character",
- parser.offset, int(value))
- }
-
- case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
- var low, high int
- if parser.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- // The UTF-16 encoding is not as simple as one might
- // naively think. Check RFC 2781
- // (http://www.ietf.org/rfc/rfc2781.txt).
- //
- // Normally, two subsequent bytes describe a Unicode
- // character. However a special technique (called a
- // surrogate pair) is used for specifying character
- // values larger than 0xFFFF.
- //
- // A surrogate pair consists of two pseudo-characters:
- // high surrogate area (0xD800-0xDBFF)
- // low surrogate area (0xDC00-0xDFFF)
- //
- // The following formulas are used for decoding
- // and encoding characters using surrogate pairs:
- //
- // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
- // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
- // W1 = 110110yyyyyyyyyy
- // W2 = 110111xxxxxxxxxx
- //
- // where U is the character value, W1 is the high surrogate
- // area, W2 is the low surrogate area.
-
- // Check for incomplete UTF-16 character.
- if raw_unread < 2 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 character",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the character.
- value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
-
- // Check for unexpected low surrogate area.
- if value&0xFC00 == 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "unexpected low surrogate area",
- parser.offset, int(value))
- }
-
- // Check for a high surrogate area.
- if value&0xFC00 == 0xD800 {
- width = 4
-
- // Check for incomplete surrogate pair.
- if raw_unread < 4 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 surrogate pair",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the next character.
- value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
-
- // Check for a low surrogate area.
- if value2&0xFC00 != 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "expected low surrogate area",
- parser.offset+2, int(value2))
- }
-
- // Generate the value of the surrogate pair.
- value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
- } else {
- width = 2
- }
-
- default:
- panic("impossible")
- }
-
- // Check if the character is in the allowed range:
- // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
- // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
- // | [#x10000-#x10FFFF] (32 bit)
- switch {
- case value == 0x09:
- case value == 0x0A:
- case value == 0x0D:
- case value >= 0x20 && value <= 0x7E:
- case value == 0x85:
- case value >= 0xA0 && value <= 0xD7FF:
- case value >= 0xE000 && value <= 0xFFFD:
- case value >= 0x10000 && value <= 0x10FFFF:
- default:
- return yaml_parser_set_reader_error(parser,
- "control characters are not allowed",
- parser.offset, int(value))
- }
-
- // Move the raw pointers.
- parser.raw_buffer_pos += width
- parser.offset += width
-
- // Finally put the character into the buffer.
- if value <= 0x7F {
- // 0000 0000-0000 007F . 0xxxxxxx
- parser.buffer[buffer_len+0] = byte(value)
- } else if value <= 0x7FF {
- // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
- parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
- } else if value <= 0xFFFF {
- // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
- } else {
- // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
- }
- buffer_len += width
-
- parser.unread++
- }
-
- // On EOF, put NUL into the buffer and return.
- if parser.eof {
- parser.buffer[buffer_len] = 0
- buffer_len++
- parser.unread++
- break
- }
- }
- parser.buffer = parser.buffer[:buffer_len]
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go
deleted file mode 100644
index 93a8632743..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package yaml
-
-import (
- "encoding/base64"
- "math"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-type resolveMapItem struct {
- value interface{}
- tag string
-}
-
-var resolveTable = make([]byte, 256)
-var resolveMap = make(map[string]resolveMapItem)
-
-func init() {
- t := resolveTable
- t[int('+')] = 'S' // Sign
- t[int('-')] = 'S'
- for _, c := range "0123456789" {
- t[int(c)] = 'D' // Digit
- }
- for _, c := range "yYnNtTfFoO~" {
- t[int(c)] = 'M' // In map
- }
- t[int('.')] = '.' // Float (potentially in map)
-
- var resolveMapList = []struct {
- v interface{}
- tag string
- l []string
- }{
- {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
- {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
- {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
- {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
- {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
- {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
- {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
- {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
- {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
- {"<<", yaml_MERGE_TAG, []string{"<<"}},
- }
-
- m := resolveMap
- for _, item := range resolveMapList {
- for _, s := range item.l {
- m[s] = resolveMapItem{item.v, item.tag}
- }
- }
-}
-
-const longTagPrefix = "tag:yaml.org,2002:"
-
-func shortTag(tag string) string {
- // TODO This can easily be made faster and produce less garbage.
- if strings.HasPrefix(tag, longTagPrefix) {
- return "!!" + tag[len(longTagPrefix):]
- }
- return tag
-}
-
-func longTag(tag string) string {
- if strings.HasPrefix(tag, "!!") {
- return longTagPrefix + tag[2:]
- }
- return tag
-}
-
-func resolvableTag(tag string) bool {
- switch tag {
- case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
- return true
- }
- return false
-}
-
-func resolve(tag string, in string) (rtag string, out interface{}) {
- if !resolvableTag(tag) {
- return tag, in
- }
-
- defer func() {
- switch tag {
- case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
- return
- }
- failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
- }()
-
- // Any data is accepted as a !!str or !!binary.
- // Otherwise, the prefix is enough of a hint about what it might be.
- hint := byte('N')
- if in != "" {
- hint = resolveTable[in[0]]
- }
- if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
- // Handle things we can lookup in a map.
- if item, ok := resolveMap[in]; ok {
- return item.tag, item.value
- }
-
- // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
- // are purposefully unsupported here. They're still quoted on
- // the way out for compatibility with other parser, though.
-
- switch hint {
- case 'M':
- // We've already checked the map above.
-
- case '.':
- // Not in the map, so maybe a normal float.
- floatv, err := strconv.ParseFloat(in, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
-
- case 'D', 'S':
- // Int, float, or timestamp.
- plain := strings.Replace(in, "_", "", -1)
- intv, err := strconv.ParseInt(plain, 0, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, int(intv)
- } else {
- return yaml_INT_TAG, intv
- }
- }
- uintv, err := strconv.ParseUint(plain, 0, 64)
- if err == nil {
- return yaml_INT_TAG, uintv
- }
- floatv, err := strconv.ParseFloat(plain, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
- if strings.HasPrefix(plain, "0b") {
- intv, err := strconv.ParseInt(plain[2:], 2, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, int(intv)
- } else {
- return yaml_INT_TAG, intv
- }
- }
- uintv, err := strconv.ParseUint(plain[2:], 2, 64)
- if err == nil {
- return yaml_INT_TAG, uintv
- }
- } else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt(plain[3:], 2, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, -int(intv)
- } else {
- return yaml_INT_TAG, -intv
- }
- }
- }
- // XXX Handle timestamps here.
-
- default:
- panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
- }
- }
- if tag == yaml_BINARY_TAG {
- return yaml_BINARY_TAG, in
- }
- if utf8.ValidString(in) {
- return yaml_STR_TAG, in
- }
- return yaml_BINARY_TAG, encodeBase64(in)
-}
-
-// encodeBase64 encodes s as base64 that is broken up into multiple lines
-// as appropriate for the resulting length.
-func encodeBase64(s string) string {
- const lineLen = 70
- encLen := base64.StdEncoding.EncodedLen(len(s))
- lines := encLen/lineLen + 1
- buf := make([]byte, encLen*2+lines)
- in := buf[0:encLen]
- out := buf[encLen:]
- base64.StdEncoding.Encode(in, []byte(s))
- k := 0
- for i := 0; i < len(in); i += lineLen {
- j := i + lineLen
- if j > len(in) {
- j = len(in)
- }
- k += copy(out[k:], in[i:j])
- if lines > 1 {
- out[k] = '\n'
- k++
- }
- }
- return string(out[:k])
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go
deleted file mode 100644
index fe93b190c2..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go
+++ /dev/null
@@ -1,2710 +0,0 @@
-package yaml
-
-import (
- "bytes"
- "fmt"
-)
-
-// Introduction
-// ************
-//
-// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
-// some cases we are less restrictive that it requires.
-//
-// The process of transforming a YAML stream into a sequence of events is
-// divided on two steps: Scanning and Parsing.
-//
-// The Scanner transforms the input stream into a sequence of tokens, while the
-// parser transform the sequence of tokens produced by the Scanner into a
-// sequence of parsing events.
-//
-// The Scanner is rather clever and complicated. The Parser, on the contrary,
-// is a straightforward implementation of a recursive-descendant parser (or,
-// LL(1) parser, as it is usually called).
-//
-// Actually there are two issues of Scanning that might be called "clever", the
-// rest is quite straightforward. The issues are "block collection start" and
-// "simple keys". Both issues are explained below in details.
-//
-// Here the Scanning step is explained and implemented. We start with the list
-// of all the tokens produced by the Scanner together with short descriptions.
-//
-// Now, tokens:
-//
-// STREAM-START(encoding) # The stream start.
-// STREAM-END # The stream end.
-// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
-// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
-// DOCUMENT-START # '---'
-// DOCUMENT-END # '...'
-// BLOCK-SEQUENCE-START # Indentation increase denoting a block
-// BLOCK-MAPPING-START # sequence or a block mapping.
-// BLOCK-END # Indentation decrease.
-// FLOW-SEQUENCE-START # '['
-// FLOW-SEQUENCE-END # ']'
-// BLOCK-SEQUENCE-START # '{'
-// BLOCK-SEQUENCE-END # '}'
-// BLOCK-ENTRY # '-'
-// FLOW-ENTRY # ','
-// KEY # '?' or nothing (simple keys).
-// VALUE # ':'
-// ALIAS(anchor) # '*anchor'
-// ANCHOR(anchor) # '&anchor'
-// TAG(handle,suffix) # '!handle!suffix'
-// SCALAR(value,style) # A scalar.
-//
-// The following two tokens are "virtual" tokens denoting the beginning and the
-// end of the stream:
-//
-// STREAM-START(encoding)
-// STREAM-END
-//
-// We pass the information about the input stream encoding with the
-// STREAM-START token.
-//
-// The next two tokens are responsible for tags:
-//
-// VERSION-DIRECTIVE(major,minor)
-// TAG-DIRECTIVE(handle,prefix)
-//
-// Example:
-//
-// %YAML 1.1
-// %TAG ! !foo
-// %TAG !yaml! tag:yaml.org,2002:
-// ---
-//
-// The correspoding sequence of tokens:
-//
-// STREAM-START(utf-8)
-// VERSION-DIRECTIVE(1,1)
-// TAG-DIRECTIVE("!","!foo")
-// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
-// DOCUMENT-START
-// STREAM-END
-//
-// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
-// line.
-//
-// The document start and end indicators are represented by:
-//
-// DOCUMENT-START
-// DOCUMENT-END
-//
-// Note that if a YAML stream contains an implicit document (without '---'
-// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
-// produced.
-//
-// In the following examples, we present whole documents together with the
-// produced tokens.
-//
-// 1. An implicit document:
-//
-// 'a scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// STREAM-END
-//
-// 2. An explicit document:
-//
-// ---
-// 'a scalar'
-// ...
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-END
-// STREAM-END
-//
-// 3. Several documents in a stream:
-//
-// 'a scalar'
-// ---
-// 'another scalar'
-// ---
-// 'yet another scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("another scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("yet another scalar",single-quoted)
-// STREAM-END
-//
-// We have already introduced the SCALAR token above. The following tokens are
-// used to describe aliases, anchors, tag, and scalars:
-//
-// ALIAS(anchor)
-// ANCHOR(anchor)
-// TAG(handle,suffix)
-// SCALAR(value,style)
-//
-// The following series of examples illustrate the usage of these tokens:
-//
-// 1. A recursive sequence:
-//
-// &A [ *A ]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// ANCHOR("A")
-// FLOW-SEQUENCE-START
-// ALIAS("A")
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A tagged scalar:
-//
-// !!float "3.14" # A good approximation.
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// TAG("!!","float")
-// SCALAR("3.14",double-quoted)
-// STREAM-END
-//
-// 3. Various scalar styles:
-//
-// --- # Implicit empty plain scalars do not produce tokens.
-// --- a plain scalar
-// --- 'a single-quoted scalar'
-// --- "a double-quoted scalar"
-// --- |-
-// a literal scalar
-// --- >-
-// a folded
-// scalar
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// DOCUMENT-START
-// SCALAR("a plain scalar",plain)
-// DOCUMENT-START
-// SCALAR("a single-quoted scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("a double-quoted scalar",double-quoted)
-// DOCUMENT-START
-// SCALAR("a literal scalar",literal)
-// DOCUMENT-START
-// SCALAR("a folded scalar",folded)
-// STREAM-END
-//
-// Now it's time to review collection-related tokens. We will start with
-// flow collections:
-//
-// FLOW-SEQUENCE-START
-// FLOW-SEQUENCE-END
-// FLOW-MAPPING-START
-// FLOW-MAPPING-END
-// FLOW-ENTRY
-// KEY
-// VALUE
-//
-// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
-// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
-// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
-// indicators '?' and ':', which are used for denoting mapping keys and values,
-// are represented by the KEY and VALUE tokens.
-//
-// The following examples show flow collections:
-//
-// 1. A flow sequence:
-//
-// [item 1, item 2, item 3]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-SEQUENCE-START
-// SCALAR("item 1",plain)
-// FLOW-ENTRY
-// SCALAR("item 2",plain)
-// FLOW-ENTRY
-// SCALAR("item 3",plain)
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A flow mapping:
-//
-// {
-// a simple key: a value, # Note that the KEY token is produced.
-// ? a complex key: another value,
-// }
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// FLOW-ENTRY
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// FLOW-ENTRY
-// FLOW-MAPPING-END
-// STREAM-END
-//
-// A simple key is a key which is not denoted by the '?' indicator. Note that
-// the Scanner still produce the KEY token whenever it encounters a simple key.
-//
-// For scanning block collections, the following tokens are used (note that we
-// repeat KEY and VALUE here):
-//
-// BLOCK-SEQUENCE-START
-// BLOCK-MAPPING-START
-// BLOCK-END
-// BLOCK-ENTRY
-// KEY
-// VALUE
-//
-// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
-// increase that precedes a block collection (cf. the INDENT token in Python).
-// The token BLOCK-END denote indentation decrease that ends a block collection
-// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
-// that makes detections of these tokens more complex.
-//
-// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
-// '-', '?', and ':' correspondingly.
-//
-// The following examples show how the tokens BLOCK-SEQUENCE-START,
-// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
-//
-// 1. Block sequences:
-//
-// - item 1
-// - item 2
-// -
-// - item 3.1
-// - item 3.2
-// -
-// key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 3.1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 3.2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Block mappings:
-//
-// a simple key: a value # The KEY token is produced here.
-// ? a complex key
-// : another value
-// a mapping:
-// key 1: value 1
-// key 2: value 2
-// a sequence:
-// - item 1
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// KEY
-// SCALAR("a mapping",plain)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML does not always require to start a new block collection from a new
-// line. If the current line contains only '-', '?', and ':' indicators, a new
-// block collection may start at the current line. The following examples
-// illustrate this case:
-//
-// 1. Collections in a sequence:
-//
-// - - item 1
-// - item 2
-// - key 1: value 1
-// key 2: value 2
-// - ? complex key
-// : complex value
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("complex key")
-// VALUE
-// SCALAR("complex value")
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Collections in a mapping:
-//
-// ? a sequence
-// : - item 1
-// - item 2
-// ? a mapping
-// : key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a mapping",plain)
-// VALUE
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML also permits non-indented sequences if they are included into a block
-// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
-//
-// key:
-// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key",plain)
-// VALUE
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-//
-
-// Ensure that the buffer contains the required number of characters.
-// Return true on success, false on failure (reader error or memory error).
-func cache(parser *yaml_parser_t, length int) bool {
- // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
- return parser.unread >= length || yaml_parser_update_buffer(parser, length)
-}
-
-// Advance the buffer pointer.
-func skip(parser *yaml_parser_t) {
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-}
-
-func skip_line(parser *yaml_parser_t) {
- if is_crlf(parser.buffer, parser.buffer_pos) {
- parser.mark.index += 2
- parser.mark.column = 0
- parser.mark.line++
- parser.unread -= 2
- parser.buffer_pos += 2
- } else if is_break(parser.buffer, parser.buffer_pos) {
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
- }
-}
-
-// Copy a character to a string buffer and advance pointers.
-func read(parser *yaml_parser_t, s []byte) []byte {
- w := width(parser.buffer[parser.buffer_pos])
- if w == 0 {
- panic("invalid character sequence")
- }
- if len(s) == 0 {
- s = make([]byte, 0, 32)
- }
- if w == 1 && len(s)+w <= cap(s) {
- s = s[:len(s)+1]
- s[len(s)-1] = parser.buffer[parser.buffer_pos]
- parser.buffer_pos++
- } else {
- s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
- parser.buffer_pos += w
- }
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- return s
-}
-
-// Copy a line break character to a string buffer and advance pointers.
-func read_line(parser *yaml_parser_t, s []byte) []byte {
- buf := parser.buffer
- pos := parser.buffer_pos
- switch {
- case buf[pos] == '\r' && buf[pos+1] == '\n':
- // CR LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- parser.mark.index++
- parser.unread--
- case buf[pos] == '\r' || buf[pos] == '\n':
- // CR|LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 1
- case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
- // NEL . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
- // LS|PS . LS|PS
- s = append(s, buf[parser.buffer_pos:pos+3]...)
- parser.buffer_pos += 3
- default:
- return s
- }
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- return s
-}
-
-// Get the next token.
-func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Erase the token object.
- *token = yaml_token_t{} // [Go] Is this necessary?
-
- // No tokens after STREAM-END or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
- return true
- }
-
- // Ensure that the tokens queue contains enough tokens.
- if !parser.token_available {
- if !yaml_parser_fetch_more_tokens(parser) {
- return false
- }
- }
-
- // Fetch the next token from the queue.
- *token = parser.tokens[parser.tokens_head]
- parser.tokens_head++
- parser.tokens_parsed++
- parser.token_available = false
-
- if token.typ == yaml_STREAM_END_TOKEN {
- parser.stream_end_produced = true
- }
- return true
-}
-
-// Set the scanner error and return false.
-func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
- parser.error = yaml_SCANNER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = parser.mark
- return false
-}
-
-func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
- context := "while parsing a tag"
- if directive {
- context = "while parsing a %TAG directive"
- }
- return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
-}
-
-func trace(args ...interface{}) func() {
- pargs := append([]interface{}{"+++"}, args...)
- fmt.Println(pargs...)
- pargs = append([]interface{}{"---"}, args...)
- return func() { fmt.Println(pargs...) }
-}
-
-// Ensure that the tokens queue contains at least one token which can be
-// returned to the Parser.
-func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
- // While we need more tokens to fetch, do it.
- for {
- // Check if we really need to fetch more tokens.
- need_more_tokens := false
-
- if parser.tokens_head == len(parser.tokens) {
- // Queue is empty.
- need_more_tokens = true
- } else {
- // Check if any potential simple key may occupy the head position.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
- if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
- need_more_tokens = true
- break
- }
- }
- }
-
- // We are finished.
- if !need_more_tokens {
- break
- }
- // Fetch the next token.
- if !yaml_parser_fetch_next_token(parser) {
- return false
- }
- }
-
- parser.token_available = true
- return true
-}
-
-// The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
- // Ensure that the buffer is initialized.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check if we just started scanning. Fetch STREAM-START then.
- if !parser.stream_start_produced {
- return yaml_parser_fetch_stream_start(parser)
- }
-
- // Eat whitespaces and comments until we reach the next token.
- if !yaml_parser_scan_to_next_token(parser) {
- return false
- }
-
- // Remove obsolete potential simple keys.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- // Check the indentation level against the current column.
- if !yaml_parser_unroll_indent(parser, parser.mark.column) {
- return false
- }
-
- // Ensure that the buffer contains at least 4 characters. 4 is the length
- // of the longest indicators ('--- ' and '... ').
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- // Is it the end of the stream?
- if is_z(parser.buffer, parser.buffer_pos) {
- return yaml_parser_fetch_stream_end(parser)
- }
-
- // Is it a directive?
- if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
- return yaml_parser_fetch_directive(parser)
- }
-
- buf := parser.buffer
- pos := parser.buffer_pos
-
- // Is it the document start indicator?
- if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
- }
-
- // Is it the document end indicator?
- if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
- }
-
- // Is it the flow sequence start indicator?
- if buf[pos] == '[' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
- }
-
- // Is it the flow mapping start indicator?
- if parser.buffer[parser.buffer_pos] == '{' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
- }
-
- // Is it the flow sequence end indicator?
- if parser.buffer[parser.buffer_pos] == ']' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_SEQUENCE_END_TOKEN)
- }
-
- // Is it the flow mapping end indicator?
- if parser.buffer[parser.buffer_pos] == '}' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_MAPPING_END_TOKEN)
- }
-
- // Is it the flow entry indicator?
- if parser.buffer[parser.buffer_pos] == ',' {
- return yaml_parser_fetch_flow_entry(parser)
- }
-
- // Is it the block entry indicator?
- if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
- return yaml_parser_fetch_block_entry(parser)
- }
-
- // Is it the key indicator?
- if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_key(parser)
- }
-
- // Is it the value indicator?
- if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_value(parser)
- }
-
- // Is it an alias?
- if parser.buffer[parser.buffer_pos] == '*' {
- return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
- }
-
- // Is it an anchor?
- if parser.buffer[parser.buffer_pos] == '&' {
- return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
- }
-
- // Is it a tag?
- if parser.buffer[parser.buffer_pos] == '!' {
- return yaml_parser_fetch_tag(parser)
- }
-
- // Is it a literal scalar?
- if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, true)
- }
-
- // Is it a folded scalar?
- if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, false)
- }
-
- // Is it a single-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '\'' {
- return yaml_parser_fetch_flow_scalar(parser, true)
- }
-
- // Is it a double-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '"' {
- return yaml_parser_fetch_flow_scalar(parser, false)
- }
-
- // Is it a plain scalar?
- //
- // A plain scalar may start with any non-blank characters except
- //
- // '-', '?', ':', ',', '[', ']', '{', '}',
- // '#', '&', '*', '!', '|', '>', '\'', '\"',
- // '%', '@', '`'.
- //
- // In the block context (and, for the '-' indicator, in the flow context
- // too), it may also start with the characters
- //
- // '-', '?', ':'
- //
- // if it is followed by a non-space character.
- //
- // The last rule is more restrictive than the specification requires.
- // [Go] Make this logic more reasonable.
- //switch parser.buffer[parser.buffer_pos] {
- //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
- //}
- if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
- parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
- parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
- (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level == 0 &&
- (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
- !is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_plain_scalar(parser)
- }
-
- // If we don't determine the token type so far, it is an error.
- return yaml_parser_set_scanner_error(parser,
- "while scanning for the next token", parser.mark,
- "found character that cannot start any token")
-}
-
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
- // Check for a potential simple key for each flow level.
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
-
- // The specification requires that a simple key
- //
- // - is limited to a single line,
- // - is shorter than 1024 characters.
- if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
- // Check if the potential simple key to be removed is required.
- if simple_key.required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", simple_key.mark,
- "could not find expected ':'")
- }
- simple_key.possible = false
- }
- }
- return true
-}
-
-// Check if a simple key may start at the current position and add it if
-// needed.
-func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
- // A simple key is required at the current position if the scanner is in
- // the block context and the current column coincides with the indentation
- // level.
-
- required := parser.flow_level == 0 && parser.indent == parser.mark.column
-
- // A simple key is required only when it is the first token in the current
- // line. Therefore it is always allowed. But we add a check anyway.
- if required && !parser.simple_key_allowed {
- panic("should not happen")
- }
-
- //
- // If the current position may start a simple key, save it.
- //
- if parser.simple_key_allowed {
- simple_key := yaml_simple_key_t{
- possible: true,
- required: required,
- token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
- }
- simple_key.mark = parser.mark
-
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
- parser.simple_keys[len(parser.simple_keys)-1] = simple_key
- }
- return true
-}
-
-// Remove a potential simple key at the current flow level.
-func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
- i := len(parser.simple_keys) - 1
- if parser.simple_keys[i].possible {
- // If the key is required, it is an error.
- if parser.simple_keys[i].required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", parser.simple_keys[i].mark,
- "could not find expected ':'")
- }
- }
- // Remove the key from the stack.
- parser.simple_keys[i].possible = false
- return true
-}
-
-// Increase the flow level and resize the simple key list if needed.
-func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
- // Reset the simple key on the next level.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // Increase the flow level.
- parser.flow_level++
- return true
-}
-
-// Decrease the flow level.
-func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
- if parser.flow_level > 0 {
- parser.flow_level--
- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
- }
- return true
-}
-
-// Push the current indentation level to the stack and set the new level
-// the current column is greater than the indentation level. In this case,
-// append or insert the specified token into the token queue.
-func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- if parser.indent < column {
- // Push the current indentation level to the stack and set the new
- // indentation level.
- parser.indents = append(parser.indents, parser.indent)
- parser.indent = column
-
- // Create a token and insert it into the queue.
- token := yaml_token_t{
- typ: typ,
- start_mark: mark,
- end_mark: mark,
- }
- if number > -1 {
- number -= parser.tokens_parsed
- }
- yaml_insert_token(parser, number, &token)
- }
- return true
-}
-
-// Pop indentation levels from the indents stack until the current level
-// becomes less or equal to the column. For each intendation level, append
-// the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- // Loop through the intendation levels in the stack.
- for parser.indent > column {
- // Create a token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
-
- // Pop the indentation level.
- parser.indent = parser.indents[len(parser.indents)-1]
- parser.indents = parser.indents[:len(parser.indents)-1]
- }
- return true
-}
-
-// Initialize the scanner and produce the STREAM-START token.
-func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
-
- // Set the initial indentation.
- parser.indent = -1
-
- // Initialize the simple key stack.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // A simple key is allowed at the beginning of the stream.
- parser.simple_key_allowed = true
-
- // We have started.
- parser.stream_start_produced = true
-
- // Create the STREAM-START token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_START_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- encoding: parser.encoding,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the STREAM-END token and shut down the scanner.
-func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
-
- // Force new line.
- if parser.mark.column != 0 {
- parser.mark.column = 0
- parser.mark.line++
- }
-
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the STREAM-END token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
-func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
- token := yaml_token_t{}
- if !yaml_parser_scan_directive(parser, &token) {
- return false
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the DOCUMENT-START or DOCUMENT-END token.
-func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Consume the token.
- start_mark := parser.mark
-
- skip(parser)
- skip(parser)
- skip(parser)
-
- end_mark := parser.mark
-
- // Create the DOCUMENT-START or DOCUMENT-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
-func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // The indicators '[' and '{' may start a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // Increase the flow level.
- if !yaml_parser_increase_flow_level(parser) {
- return false
- }
-
- // A simple key may follow the indicators '[' and '{'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
-func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset any potential simple key on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Decrease the flow level.
- if !yaml_parser_decrease_flow_level(parser) {
- return false
- }
-
- // No simple keys after the indicators ']' and '}'.
- parser.simple_key_allowed = false
-
- // Consume the token.
-
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-ENTRY token.
-func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after ','.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_FLOW_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the BLOCK-ENTRY token.
-func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
- // Check if the scanner is in the block context.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new entry.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "block sequence entries are not allowed in this context")
- }
- // Add the BLOCK-SEQUENCE-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
- return false
- }
- } else {
- // It is an error for the '-' indicator to occur in the flow context,
- // but we let the Parser detect and report about it because the Parser
- // is able to point to the context.
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '-'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the BLOCK-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the KEY token.
-func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
-
- // In the block context, additional checks are required.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new key (not nessesary simple).
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping keys are not allowed in this context")
- }
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '?' in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the KEY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the VALUE token.
-func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
-
- simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
-
- // Have we found a simple key?
- if simple_key.possible {
- // Create the KEY token and insert it into the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: simple_key.mark,
- end_mark: simple_key.mark,
- }
- yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
-
- // In the block context, we may need to add the BLOCK-MAPPING-START token.
- if !yaml_parser_roll_indent(parser, simple_key.mark.column,
- simple_key.token_number,
- yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
- return false
- }
-
- // Remove the simple key.
- simple_key.possible = false
-
- // A simple key cannot follow another simple key.
- parser.simple_key_allowed = false
-
- } else {
- // The ':' indicator follows a complex key.
-
- // In the block context, extra checks are required.
- if parser.flow_level == 0 {
-
- // Check if we are allowed to start a complex value.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping values are not allowed in this context")
- }
-
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Simple keys after ':' are allowed in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
- }
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the VALUE token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_VALUE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the ALIAS or ANCHOR token.
-func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // An anchor or an alias could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow an anchor or an alias.
- parser.simple_key_allowed = false
-
- // Create the ALIAS or ANCHOR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_anchor(parser, &token, typ) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the TAG token.
-func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
- // A tag could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a tag.
- parser.simple_key_allowed = false
-
- // Create the TAG token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_tag(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
-func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
- // Remove any potential simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // A simple key may follow a block scalar.
- parser.simple_key_allowed = true
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_block_scalar(parser, &token, literal) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
-func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_flow_scalar(parser, &token, single) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,plain) token.
-func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_plain_scalar(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Eat whitespaces and comments until the next token is found.
-func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
-
- // Until the next token is not found.
- for {
- // Allow the BOM mark to start a line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
- skip(parser)
- }
-
- // Eat whitespaces.
- // Tabs are allowed:
- // - in the flow context
- // - in the block context, but not at the beginning of the line or
- // after '-', '?', or ':' (complex value).
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Eat a comment until a line break.
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // If it is a line break, eat it.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
-
- // In the block context, a new line may start a simple key.
- if parser.flow_level == 0 {
- parser.simple_key_allowed = true
- }
- } else {
- break // We have found a token.
- }
- }
-
- return true
-}
-
-// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Eat '%'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the directive name.
- var name []byte
- if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
- return false
- }
-
- // Is it a YAML directive?
- if bytes.Equal(name, []byte("YAML")) {
- // Scan the VERSION directive value.
- var major, minor int8
- if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
- return false
- }
- end_mark := parser.mark
-
- // Create a VERSION-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_VERSION_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- major: major,
- minor: minor,
- }
-
- // Is it a TAG directive?
- } else if bytes.Equal(name, []byte("TAG")) {
- // Scan the TAG directive value.
- var handle, prefix []byte
- if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
- return false
- }
- end_mark := parser.mark
-
- // Create a TAG-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_TAG_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- prefix: prefix,
- }
-
- // Unknown directive.
- } else {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found uknown directive name")
- return false
- }
-
- // Eat the rest of the line including any comments.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- return true
-}
-
-// Scan the directive name.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^
-//
-func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
- // Consume the directive name.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- var s []byte
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the name is empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "could not find expected directive name")
- return false
- }
-
- // Check for an blank character after the name.
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found unexpected non-alphabetical character")
- return false
- }
- *name = s
- return true
-}
-
-// Scan the value of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^
-func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the major version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
- return false
- }
-
- // Eat '.'.
- if parser.buffer[parser.buffer_pos] != '.' {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected digit or '.' character")
- }
-
- skip(parser)
-
- // Consume the minor version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
- return false
- }
- return true
-}
-
-const max_number_length = 2
-
-// Scan the version number of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^
-// %YAML 1.1 # a comment \n
-// ^
-func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
-
- // Repeat while the next character is digit.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var value, length int8
- for is_digit(parser.buffer, parser.buffer_pos) {
- // Check if the number is too long.
- length++
- if length > max_number_length {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "found extremely long version number")
- }
- value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the number was present.
- if length == 0 {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected version number")
- }
- *number = value
- return true
-}
-
-// Scan the value of a TAG-DIRECTIVE token.
-//
-// Scope:
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
- var handle_value, prefix_value []byte
-
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a handle.
- if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
- return false
- }
-
- // Expect a whitespace.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blank(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace")
- return false
- }
-
- // Eat whitespaces.
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a prefix.
- if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
- return false
- }
-
- // Expect a whitespace or line break.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- *handle = handle_value
- *prefix = prefix_value
- return true
-}
-
-func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
- var s []byte
-
- // Eat the indicator character.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the value.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- end_mark := parser.mark
-
- /*
- * Check if length of the anchor is greater than 0 and it is followed by
- * a whitespace character or one of the indicators:
- *
- * '?', ':', ',', ']', '}', '%', '@', '`'.
- */
-
- if len(s) == 0 ||
- !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
- parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '`') {
- context := "while scanning an alias"
- if typ == yaml_ANCHOR_TOKEN {
- context = "while scanning an anchor"
- }
- yaml_parser_set_scanner_error(parser, context, start_mark,
- "did not find expected alphabetic or numeric character")
- return false
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- }
-
- return true
-}
-
-/*
- * Scan a TAG token.
- */
-
-func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
- var handle, suffix []byte
-
- start_mark := parser.mark
-
- // Check if the tag is in the canonical form.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- if parser.buffer[parser.buffer_pos+1] == '<' {
- // Keep the handle as ''
-
- // Eat '!<'
- skip(parser)
- skip(parser)
-
- // Consume the tag value.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
-
- // Check for '>' and eat it.
- if parser.buffer[parser.buffer_pos] != '>' {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find the expected '>'")
- return false
- }
-
- skip(parser)
- } else {
- // The tag has either the '!suffix' or the '!handle!suffix' form.
-
- // First, try to scan a handle.
- if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
- return false
- }
-
- // Check if it is, indeed, handle.
- if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
- // Scan the suffix now.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
- } else {
- // It wasn't a handle after all. Scan the rest of the tag.
- if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
- return false
- }
-
- // Set the handle to '!'.
- handle = []byte{'!'}
-
- // A special case: the '!' tag. Set the handle to '' and the
- // suffix to '!'.
- if len(suffix) == 0 {
- handle, suffix = suffix, handle
- }
- }
- }
-
- // Check the character which ends the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_TAG_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- suffix: suffix,
- }
- return true
-}
-
-// Scan a tag handle.
-func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
- // Check the initial '!' character.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] != '!' {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
-
- var s []byte
-
- // Copy the '!' character.
- s = read(parser, s)
-
- // Copy all subsequent alphabetical and numerical characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the trailing character is '!' and copy it.
- if parser.buffer[parser.buffer_pos] == '!' {
- s = read(parser, s)
- } else {
- // It's either the '!' tag or not really a tag handle. If it's a %TAG
- // directive, it's an error. If it's a tag token, it must be a part of URI.
- if directive && !(s[0] == '!' && s[1] == 0) {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
- }
-
- *handle = s
- return true
-}
-
-// Scan a tag.
-func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
- //size_t length = head ? strlen((char *)head) : 0
- var s []byte
-
- // Copy the head if needed.
- //
- // Note that we don't copy the leading '!' character.
- if len(head) > 1 {
- s = append(s, head[1:]...)
- }
-
- // Scan the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // The set of characters that may appear in URI is as follows:
- //
- // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
- // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
- // '%'.
- // [Go] Convert this into more reasonable logic.
- for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
- parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
- parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
- parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
- parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
- parser.buffer[parser.buffer_pos] == '%' {
- // Check if it is a URI-escape sequence.
- if parser.buffer[parser.buffer_pos] == '%' {
- if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
- return false
- }
- } else {
- s = read(parser, s)
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the tag is non-empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected tag URI")
- return false
- }
- *uri = s
- return true
-}
-
-// Decode an URI-escape sequence corresponding to a single UTF-8 character.
-func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
-
- // Decode the required number of characters.
- w := 1024
- for w > 0 {
- // Check for a URI-escaped octet.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
-
- if !(parser.buffer[parser.buffer_pos] == '%' &&
- is_hex(parser.buffer, parser.buffer_pos+1) &&
- is_hex(parser.buffer, parser.buffer_pos+2)) {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find URI escaped octet")
- }
-
- // Get the octet.
- octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
-
- // If it is the leading octet, determine the length of the UTF-8 sequence.
- if w == 1024 {
- w = width(octet)
- if w == 0 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect leading UTF-8 octet")
- }
- } else {
- // Check if the trailing octet is correct.
- if octet&0xC0 != 0x80 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect trailing UTF-8 octet")
- }
- }
-
- // Copy the octet and move the pointers.
- *s = append(*s, octet)
- skip(parser)
- skip(parser)
- skip(parser)
- w--
- }
- return true
-}
-
-// Scan a block scalar.
-func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
- // Eat the indicator '|' or '>'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the additional block scalar indicators.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check for a chomping indicator.
- var chomping, increment int
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- // Set the chomping method and eat the indicator.
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
-
- // Check for an indentation indicator.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_digit(parser.buffer, parser.buffer_pos) {
- // Check that the intendation is greater than 0.
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an intendation indicator equal to 0")
- return false
- }
-
- // Get the intendation level and eat the indicator.
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
- }
-
- } else if is_digit(parser.buffer, parser.buffer_pos) {
- // Do the same as above, but in the opposite order.
-
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an intendation indicator equal to 0")
- return false
- }
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
-
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
- }
- }
-
- // Eat whitespaces and comments to the end of the line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- end_mark := parser.mark
-
- // Set the intendation level if it was specified.
- var indent int
- if increment > 0 {
- if parser.indent >= 0 {
- indent = parser.indent + increment
- } else {
- indent = increment
- }
- }
-
- // Scan the leading line breaks and determine the indentation level if needed.
- var s, leading_break, trailing_breaks []byte
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
-
- // Scan the block scalar content.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var leading_blank, trailing_blank bool
- for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
- // We are at the beginning of a non-empty line.
-
- // Is it a trailing whitespace?
- trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Check if we need to fold the leading line break.
- if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
- // Do we need to join the lines by space?
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- }
- } else {
- s = append(s, leading_break...)
- }
- leading_break = leading_break[:0]
-
- // Append the remaining line breaks.
- s = append(s, trailing_breaks...)
- trailing_breaks = trailing_breaks[:0]
-
- // Is it a leading whitespace?
- leading_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Consume the current line.
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- leading_break = read_line(parser, leading_break)
-
- // Eat the following intendation spaces and line breaks.
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
- }
-
- // Chomp the tail.
- if chomping != -1 {
- s = append(s, leading_break...)
- }
- if chomping == 1 {
- s = append(s, trailing_breaks...)
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_LITERAL_SCALAR_STYLE,
- }
- if !literal {
- token.style = yaml_FOLDED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan intendation spaces and line breaks for a block scalar. Determine the
-// intendation level if needed.
-func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
- *end_mark = parser.mark
-
- // Eat the intendation spaces and line breaks.
- max_indent := 0
- for {
- // Eat the intendation spaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.mark.column > max_indent {
- max_indent = parser.mark.column
- }
-
- // Check for a tab character messing the intendation.
- if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
- return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found a tab character where an intendation space is expected")
- }
-
- // Have we found a non-empty line?
- if !is_break(parser.buffer, parser.buffer_pos) {
- break
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- // [Go] Should really be returning breaks instead.
- *breaks = read_line(parser, *breaks)
- *end_mark = parser.mark
- }
-
- // Determine the indentation level if needed.
- if *indent == 0 {
- *indent = max_indent
- if *indent < parser.indent+1 {
- *indent = parser.indent + 1
- }
- if *indent < 1 {
- *indent = 1
- }
- }
- return true
-}
-
-// Scan a quoted scalar.
-func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
- // Eat the left quote.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the content of the quoted scalar.
- var s, leading_break, trailing_breaks, whitespaces []byte
- for {
- // Check that there are no document indicators at the beginning of the line.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected document indicator")
- return false
- }
-
- // Check for EOF.
- if is_z(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected end of stream")
- return false
- }
-
- // Consume non-blank characters.
- leading_blanks := false
- for !is_blankz(parser.buffer, parser.buffer_pos) {
- if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
- // Is is an escaped single quote.
- s = append(s, '\'')
- skip(parser)
- skip(parser)
-
- } else if single && parser.buffer[parser.buffer_pos] == '\'' {
- // It is a right single quote.
- break
- } else if !single && parser.buffer[parser.buffer_pos] == '"' {
- // It is a right double quote.
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
- // It is an escaped line break.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
- skip(parser)
- skip_line(parser)
- leading_blanks = true
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
- // It is an escape sequence.
- code_length := 0
-
- // Check the escape character.
- switch parser.buffer[parser.buffer_pos+1] {
- case '0':
- s = append(s, 0)
- case 'a':
- s = append(s, '\x07')
- case 'b':
- s = append(s, '\x08')
- case 't', '\t':
- s = append(s, '\x09')
- case 'n':
- s = append(s, '\x0A')
- case 'v':
- s = append(s, '\x0B')
- case 'f':
- s = append(s, '\x0C')
- case 'r':
- s = append(s, '\x0D')
- case 'e':
- s = append(s, '\x1B')
- case ' ':
- s = append(s, '\x20')
- case '"':
- s = append(s, '"')
- case '\'':
- s = append(s, '\'')
- case '\\':
- s = append(s, '\\')
- case 'N': // NEL (#x85)
- s = append(s, '\xC2')
- s = append(s, '\x85')
- case '_': // #xA0
- s = append(s, '\xC2')
- s = append(s, '\xA0')
- case 'L': // LS (#x2028)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA8')
- case 'P': // PS (#x2029)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA9')
- case 'x':
- code_length = 2
- case 'u':
- code_length = 4
- case 'U':
- code_length = 8
- default:
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found unknown escape character")
- return false
- }
-
- skip(parser)
- skip(parser)
-
- // Consume an arbitrary escape code.
- if code_length > 0 {
- var value int
-
- // Scan the character value.
- if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
- return false
- }
- for k := 0; k < code_length; k++ {
- if !is_hex(parser.buffer, parser.buffer_pos+k) {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "did not find expected hexdecimal number")
- return false
- }
- value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
- }
-
- // Check the value and write the character.
- if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found invalid Unicode character escape code")
- return false
- }
- if value <= 0x7F {
- s = append(s, byte(value))
- } else if value <= 0x7FF {
- s = append(s, byte(0xC0+(value>>6)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else if value <= 0xFFFF {
- s = append(s, byte(0xE0+(value>>12)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else {
- s = append(s, byte(0xF0+(value>>18)))
- s = append(s, byte(0x80+((value>>12)&0x3F)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- }
-
- // Advance the pointer.
- for k := 0; k < code_length; k++ {
- skip(parser)
- }
- }
- } else {
- // It is a non-escaped non-blank character.
- s = read(parser, s)
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Check if we are at the end of the scalar.
- if single {
- if parser.buffer[parser.buffer_pos] == '\'' {
- break
- }
- } else {
- if parser.buffer[parser.buffer_pos] == '"' {
- break
- }
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Join the whitespaces or fold line breaks.
- if leading_blanks {
- // Do we need to fold line breaks?
- if len(leading_break) > 0 && leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Eat the right quote.
- skip(parser)
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
- }
- if !single {
- token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan a plain scalar.
-func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
-
- var s, leading_break, trailing_breaks, whitespaces []byte
- var leading_blanks bool
- var indent = parser.indent + 1
-
- start_mark := parser.mark
- end_mark := parser.mark
-
- // Consume the content of the plain scalar.
- for {
- // Check for a document indicator.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- break
- }
-
- // Check for a comment.
- if parser.buffer[parser.buffer_pos] == '#' {
- break
- }
-
- // Consume non-blank characters.
- for !is_blankz(parser.buffer, parser.buffer_pos) {
-
- // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
- if parser.flow_level > 0 &&
- parser.buffer[parser.buffer_pos] == ':' &&
- !is_blankz(parser.buffer, parser.buffer_pos+1) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found unexpected ':'")
- return false
- }
-
- // Check for indicators that may end a plain scalar.
- if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level > 0 &&
- (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}')) {
- break
- }
-
- // Check if we need to join whitespaces and breaks.
- if leading_blanks || len(whitespaces) > 0 {
- if leading_blanks {
- // Do we need to fold line breaks?
- if leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- leading_blanks = false
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Copy the character.
- s = read(parser, s)
-
- end_mark = parser.mark
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Is it the end?
- if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
- break
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
-
- // Check for tab character that abuse intendation.
- if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found a tab character that violate intendation")
- return false
- }
-
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check intendation level.
- if parser.flow_level == 0 && parser.mark.column < indent {
- break
- }
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_PLAIN_SCALAR_STYLE,
- }
-
- // Note that we change the 'simple_key_allowed' flag.
- if leading_blanks {
- parser.simple_key_allowed = true
- }
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go
deleted file mode 100644
index 5958822f9c..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package yaml
-
-import (
- "reflect"
- "unicode"
-)
-
-type keyList []reflect.Value
-
-func (l keyList) Len() int { return len(l) }
-func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l keyList) Less(i, j int) bool {
- a := l[i]
- b := l[j]
- ak := a.Kind()
- bk := b.Kind()
- for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
- a = a.Elem()
- ak = a.Kind()
- }
- for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
- b = b.Elem()
- bk = b.Kind()
- }
- af, aok := keyFloat(a)
- bf, bok := keyFloat(b)
- if aok && bok {
- if af != bf {
- return af < bf
- }
- if ak != bk {
- return ak < bk
- }
- return numLess(a, b)
- }
- if ak != reflect.String || bk != reflect.String {
- return ak < bk
- }
- ar, br := []rune(a.String()), []rune(b.String())
- for i := 0; i < len(ar) && i < len(br); i++ {
- if ar[i] == br[i] {
- continue
- }
- al := unicode.IsLetter(ar[i])
- bl := unicode.IsLetter(br[i])
- if al && bl {
- return ar[i] < br[i]
- }
- if al || bl {
- return bl
- }
- var ai, bi int
- var an, bn int64
- for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
- an = an*10 + int64(ar[ai]-'0')
- }
- for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
- bn = bn*10 + int64(br[bi]-'0')
- }
- if an != bn {
- return an < bn
- }
- if ai != bi {
- return ai < bi
- }
- return ar[i] < br[i]
- }
- return len(ar) < len(br)
-}
-
-// keyFloat returns a float value for v if it is a number/bool
-// and whether it is a number/bool or not.
-func keyFloat(v reflect.Value) (f float64, ok bool) {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.Int()), true
- case reflect.Float32, reflect.Float64:
- return v.Float(), true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return float64(v.Uint()), true
- case reflect.Bool:
- if v.Bool() {
- return 1, true
- }
- return 0, true
- }
- return 0, false
-}
-
-// numLess returns whether a < b.
-// a and b must necessarily have the same kind.
-func numLess(a, b reflect.Value) bool {
- switch a.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return a.Int() < b.Int()
- case reflect.Float32, reflect.Float64:
- return a.Float() < b.Float()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return a.Uint() < b.Uint()
- case reflect.Bool:
- return !a.Bool() && b.Bool()
- }
- panic("not a number")
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go
deleted file mode 100644
index 190362f25d..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_WRITER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
- if emitter.write_handler == nil {
- panic("write handler not set")
- }
-
- // Check if the buffer is empty.
- if emitter.buffer_pos == 0 {
- return true
- }
-
- // If the output encoding is UTF-8, we don't need to recode the buffer.
- if emitter.encoding == yaml_UTF8_ENCODING {
- if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- return true
- }
-
- // Recode the buffer into the raw buffer.
- var low, high int
- if emitter.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- pos := 0
- for pos < emitter.buffer_pos {
- // See the "reader.c" code for more details on UTF-8 encoding. Note
- // that we assume that the buffer contains a valid UTF-8 sequence.
-
- // Read the next UTF-8 character.
- octet := emitter.buffer[pos]
-
- var w int
- var value rune
- switch {
- case octet&0x80 == 0x00:
- w, value = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, value = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, value = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, value = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = emitter.buffer[pos+k]
- value = (value << 6) + (rune(octet) & 0x3F)
- }
- pos += w
-
- // Write the character.
- if value < 0x10000 {
- var b [2]byte
- b[high] = byte(value >> 8)
- b[low] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
- } else {
- // Write the character using a surrogate pair (check "reader.c").
- var b [4]byte
- value -= 0x10000
- b[high] = byte(0xD8 + (value >> 18))
- b[low] = byte((value >> 10) & 0xFF)
- b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
- b[low+2] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
- }
- }
-
- // Write the raw buffer.
- if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- emitter.raw_buffer = emitter.raw_buffer[:0]
- return true
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go
deleted file mode 100644
index e3e01edc96..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-// https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// MapSlice encodes and decodes as a YAML map.
-// The order of keys is preserved when encoding and decoding.
-type MapSlice []MapItem
-
-// MapItem is an item in a MapSlice.
-type MapItem struct {
- Key, Value interface{}
-}
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
-// method receives a function that may be called to unmarshal the original
-// YAML value into a field or variable. It is safe to call the unmarshal
-// function parameter more than once if necessary.
-type Unmarshaler interface {
- UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
- MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
- defer handleErr(&err)
- d := newDecoder()
- p := newParser(in)
- defer p.destroy()
- node := p.parse()
- if node != nil {
- v := reflect.ValueOf(out)
- if v.Kind() == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
- d.unmarshal(node, v)
- }
- if len(d.terrors) > 0 {
- return &TypeError{d.terrors}
- }
- return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-// `(...) yaml:"[][,[,]]" (...)`
-//
-// The following flags are currently supported:
-//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Does not apply to zero valued structs.
-//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps.
-//
-// inline Inline the struct it's applied to, so its fields
-// are processed as if they were part of the outer
-// struct.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-// type T struct {
-// F int "a,omitempty"
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
- defer handleErr(&err)
- e := newEncoder()
- defer e.destroy()
- e.marshal("", reflect.ValueOf(in))
- e.finish()
- out = e.out
- return
-}
-
-func handleErr(err *error) {
- if v := recover(); v != nil {
- if e, ok := v.(yamlError); ok {
- *err = e.err
- } else {
- panic(v)
- }
- }
-}
-
-type yamlError struct {
- err error
-}
-
-func fail(err error) {
- panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
- panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
- Errors []string
-}
-
-func (e *TypeError) Error() string {
- return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
- FieldsMap map[string]fieldInfo
- FieldsList []fieldInfo
-
- // InlineMap is the number of the field in the struct that
- // contains an ,inline map, or -1 if there's none.
- InlineMap int
-}
-
-type fieldInfo struct {
- Key string
- Num int
- OmitEmpty bool
- Flow bool
-
- // Inline holds the field index if the field is part of an inlined struct.
- Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
- fieldMapMutex.RLock()
- sinfo, found := structMap[st]
- fieldMapMutex.RUnlock()
- if found {
- return sinfo, nil
- }
-
- n := st.NumField()
- fieldsMap := make(map[string]fieldInfo)
- fieldsList := make([]fieldInfo, 0, n)
- inlineMap := -1
- for i := 0; i != n; i++ {
- field := st.Field(i)
- if field.PkgPath != "" {
- continue // Private field
- }
-
- info := fieldInfo{Num: i}
-
- tag := field.Tag.Get("yaml")
- if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
- tag = string(field.Tag)
- }
- if tag == "-" {
- continue
- }
-
- inline := false
- fields := strings.Split(tag, ",")
- if len(fields) > 1 {
- for _, flag := range fields[1:] {
- switch flag {
- case "omitempty":
- info.OmitEmpty = true
- case "flow":
- info.Flow = true
- case "inline":
- inline = true
- default:
- return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
- }
- }
- tag = fields[0]
- }
-
- if inline {
- switch field.Type.Kind() {
- // TODO: Implement support for inline maps.
- //case reflect.Map:
- // if inlineMap >= 0 {
- // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
- // }
- // if field.Type.Key() != reflect.TypeOf("") {
- // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
- // }
- // inlineMap = info.Num
- case reflect.Struct:
- sinfo, err := getStructInfo(field.Type)
- if err != nil {
- return nil, err
- }
- for _, finfo := range sinfo.FieldsList {
- if _, found := fieldsMap[finfo.Key]; found {
- msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
- if finfo.Inline == nil {
- finfo.Inline = []int{i, finfo.Num}
- } else {
- finfo.Inline = append([]int{i}, finfo.Inline...)
- }
- fieldsMap[finfo.Key] = finfo
- fieldsList = append(fieldsList, finfo)
- }
- default:
- //return nil, errors.New("Option ,inline needs a struct value or map field")
- return nil, errors.New("Option ,inline needs a struct value field")
- }
- continue
- }
-
- if tag != "" {
- info.Key = tag
- } else {
- info.Key = strings.ToLower(field.Name)
- }
-
- if _, found = fieldsMap[info.Key]; found {
- msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
-
- fieldsList = append(fieldsList, info)
- fieldsMap[info.Key] = info
- }
-
- sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
-
- fieldMapMutex.Lock()
- structMap[st] = sinfo
- fieldMapMutex.Unlock()
- return sinfo, nil
-}
-
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.String:
- return len(v.String()) == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflect.Slice:
- return v.Len() == 0
- case reflect.Map:
- return v.Len() == 0
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Struct:
- vt := v.Type()
- for i := v.NumField()-1; i >= 0; i-- {
- if vt.Field(i).PkgPath != "" {
- continue // Private field
- }
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go
deleted file mode 100644
index d60a6b6b00..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go
+++ /dev/null
@@ -1,716 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
- major int8 // The major version number.
- minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
- handle []byte // The tag handle.
- prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
- // Let the parser choose the encoding.
- yaml_ANY_ENCODING yaml_encoding_t = iota
-
- yaml_UTF8_ENCODING // The default UTF-8 encoding.
- yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
- yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
- // Let the parser choose the break type.
- yaml_ANY_BREAK yaml_break_t = iota
-
- yaml_CR_BREAK // Use CR for line breaks (Mac style).
- yaml_LN_BREAK // Use LN for line breaks (Unix style).
- yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
- // No error is produced.
- yaml_NO_ERROR yaml_error_type_t = iota
-
- yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
- yaml_READER_ERROR // Cannot read or decode the input stream.
- yaml_SCANNER_ERROR // Cannot scan the input stream.
- yaml_PARSER_ERROR // Cannot parse the input stream.
- yaml_COMPOSER_ERROR // Cannot compose a YAML document.
- yaml_WRITER_ERROR // Cannot write to the output stream.
- yaml_EMITTER_ERROR // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
- index int // The position index.
- line int // The position line.
- column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
-
- yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
- yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
- yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
- yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
- yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
- yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
- yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
- yaml_BLOCK_MAPPING_STYLE // The block mapping style.
- yaml_FLOW_MAPPING_STYLE // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
- // An empty token.
- yaml_NO_TOKEN yaml_token_type_t = iota
-
- yaml_STREAM_START_TOKEN // A STREAM-START token.
- yaml_STREAM_END_TOKEN // A STREAM-END token.
-
- yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
- yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
- yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
- yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
-
- yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
- yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
- yaml_BLOCK_END_TOKEN // A BLOCK-END token.
-
- yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
- yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
- yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
- yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
-
- yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
- yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
- yaml_KEY_TOKEN // A KEY token.
- yaml_VALUE_TOKEN // A VALUE token.
-
- yaml_ALIAS_TOKEN // An ALIAS token.
- yaml_ANCHOR_TOKEN // An ANCHOR token.
- yaml_TAG_TOKEN // A TAG token.
- yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
- switch tt {
- case yaml_NO_TOKEN:
- return "yaml_NO_TOKEN"
- case yaml_STREAM_START_TOKEN:
- return "yaml_STREAM_START_TOKEN"
- case yaml_STREAM_END_TOKEN:
- return "yaml_STREAM_END_TOKEN"
- case yaml_VERSION_DIRECTIVE_TOKEN:
- return "yaml_VERSION_DIRECTIVE_TOKEN"
- case yaml_TAG_DIRECTIVE_TOKEN:
- return "yaml_TAG_DIRECTIVE_TOKEN"
- case yaml_DOCUMENT_START_TOKEN:
- return "yaml_DOCUMENT_START_TOKEN"
- case yaml_DOCUMENT_END_TOKEN:
- return "yaml_DOCUMENT_END_TOKEN"
- case yaml_BLOCK_SEQUENCE_START_TOKEN:
- return "yaml_BLOCK_SEQUENCE_START_TOKEN"
- case yaml_BLOCK_MAPPING_START_TOKEN:
- return "yaml_BLOCK_MAPPING_START_TOKEN"
- case yaml_BLOCK_END_TOKEN:
- return "yaml_BLOCK_END_TOKEN"
- case yaml_FLOW_SEQUENCE_START_TOKEN:
- return "yaml_FLOW_SEQUENCE_START_TOKEN"
- case yaml_FLOW_SEQUENCE_END_TOKEN:
- return "yaml_FLOW_SEQUENCE_END_TOKEN"
- case yaml_FLOW_MAPPING_START_TOKEN:
- return "yaml_FLOW_MAPPING_START_TOKEN"
- case yaml_FLOW_MAPPING_END_TOKEN:
- return "yaml_FLOW_MAPPING_END_TOKEN"
- case yaml_BLOCK_ENTRY_TOKEN:
- return "yaml_BLOCK_ENTRY_TOKEN"
- case yaml_FLOW_ENTRY_TOKEN:
- return "yaml_FLOW_ENTRY_TOKEN"
- case yaml_KEY_TOKEN:
- return "yaml_KEY_TOKEN"
- case yaml_VALUE_TOKEN:
- return "yaml_VALUE_TOKEN"
- case yaml_ALIAS_TOKEN:
- return "yaml_ALIAS_TOKEN"
- case yaml_ANCHOR_TOKEN:
- return "yaml_ANCHOR_TOKEN"
- case yaml_TAG_TOKEN:
- return "yaml_TAG_TOKEN"
- case yaml_SCALAR_TOKEN:
- return "yaml_SCALAR_TOKEN"
- }
- return ""
-}
-
-// The token structure.
-type yaml_token_t struct {
- // The token type.
- typ yaml_token_type_t
-
- // The start/end of the token.
- start_mark, end_mark yaml_mark_t
-
- // The stream encoding (for yaml_STREAM_START_TOKEN).
- encoding yaml_encoding_t
-
- // The alias/anchor/scalar value or tag/tag directive handle
- // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
- value []byte
-
- // The tag suffix (for yaml_TAG_TOKEN).
- suffix []byte
-
- // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
- prefix []byte
-
- // The scalar style (for yaml_SCALAR_TOKEN).
- style yaml_scalar_style_t
-
- // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
- major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
- // An empty event.
- yaml_NO_EVENT yaml_event_type_t = iota
-
- yaml_STREAM_START_EVENT // A STREAM-START event.
- yaml_STREAM_END_EVENT // A STREAM-END event.
- yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
- yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
- yaml_ALIAS_EVENT // An ALIAS event.
- yaml_SCALAR_EVENT // A SCALAR event.
- yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
- yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
- yaml_MAPPING_START_EVENT // A MAPPING-START event.
- yaml_MAPPING_END_EVENT // A MAPPING-END event.
-)
-
-// The event structure.
-type yaml_event_t struct {
-
- // The event type.
- typ yaml_event_type_t
-
- // The start and end of the event.
- start_mark, end_mark yaml_mark_t
-
- // The document encoding (for yaml_STREAM_START_EVENT).
- encoding yaml_encoding_t
-
- // The version directive (for yaml_DOCUMENT_START_EVENT).
- version_directive *yaml_version_directive_t
-
- // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
- tag_directives []yaml_tag_directive_t
-
- // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
- anchor []byte
-
- // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- tag []byte
-
- // The scalar value (for yaml_SCALAR_EVENT).
- value []byte
-
- // Is the document start/end indicator implicit, or the tag optional?
- // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
- implicit bool
-
- // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
- quoted_implicit bool
-
- // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
- yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
- yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
- yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
- yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
- yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
- yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
- yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
- yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
- // Not in original libyaml.
- yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
- yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
-
- yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
- yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
- yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
- // An empty node.
- yaml_NO_NODE yaml_node_type_t = iota
-
- yaml_SCALAR_NODE // A scalar node.
- yaml_SEQUENCE_NODE // A sequence node.
- yaml_MAPPING_NODE // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
- key int // The key of the element.
- value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
- typ yaml_node_type_t // The node type.
- tag []byte // The node tag.
-
- // The node data.
-
- // The scalar parameters (for yaml_SCALAR_NODE).
- scalar struct {
- value []byte // The scalar value.
- length int // The length of the scalar value.
- style yaml_scalar_style_t // The scalar style.
- }
-
- // The sequence parameters (for YAML_SEQUENCE_NODE).
- sequence struct {
- items_data []yaml_node_item_t // The stack of sequence items.
- style yaml_sequence_style_t // The sequence style.
- }
-
- // The mapping parameters (for yaml_MAPPING_NODE).
- mapping struct {
- pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
- pairs_start *yaml_node_pair_t // The beginning of the stack.
- pairs_end *yaml_node_pair_t // The end of the stack.
- pairs_top *yaml_node_pair_t // The top of the stack.
- style yaml_mapping_style_t // The mapping style.
- }
-
- start_mark yaml_mark_t // The beginning of the node.
- end_mark yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
- // The document nodes.
- nodes []yaml_node_t
-
- // The version directive.
- version_directive *yaml_version_directive_t
-
- // The list of tag directives.
- tag_directives_data []yaml_tag_directive_t
- tag_directives_start int // The beginning of the tag directives list.
- tag_directives_end int // The end of the tag directives list.
-
- start_implicit int // Is the document start indicator implicit?
- end_implicit int // Is the document end indicator implicit?
-
- // The start/end of the document.
- start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out] data A pointer to an application data specified by
-// yaml_parser_set_input().
-// [out] buffer The buffer to write the data from the source.
-// [in] size The size of the buffer.
-// [out] size_read The actual number of bytes read from the source.
-//
-// On success, the handler should return 1. If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
- possible bool // Is a simple key possible?
- required bool // Is a simple key required?
- token_number int // The number of the token.
- mark yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
- yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
- yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
- yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
- yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
- yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
- yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
- yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
- yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
- yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
- yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
- yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
- yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
- yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
- yaml_PARSE_END_STATE // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
- switch ps {
- case yaml_PARSE_STREAM_START_STATE:
- return "yaml_PARSE_STREAM_START_STATE"
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_START_STATE:
- return "yaml_PARSE_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
- case yaml_PARSE_DOCUMENT_END_STATE:
- return "yaml_PARSE_DOCUMENT_END_STATE"
- case yaml_PARSE_BLOCK_NODE_STATE:
- return "yaml_PARSE_BLOCK_NODE_STATE"
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
- case yaml_PARSE_FLOW_NODE_STATE:
- return "yaml_PARSE_FLOW_NODE_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
- case yaml_PARSE_END_STATE:
- return "yaml_PARSE_END_STATE"
- }
- return ""
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
- anchor []byte // The anchor.
- index int // The node id.
- mark yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
-
- problem string // Error description.
-
- // The byte about which the problem occured.
- problem_offset int
- problem_value int
- problem_mark yaml_mark_t
-
- // The error context.
- context string
- context_mark yaml_mark_t
-
- // Reader stuff
-
- read_handler yaml_read_handler_t // Read handler.
-
- input_file io.Reader // File input data.
- input []byte // String input data.
- input_pos int
-
- eof bool // EOF flag
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- unread int // The number of unread characters in the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The input encoding.
-
- offset int // The offset of the current position (in bytes).
- mark yaml_mark_t // The mark of the current position.
-
- // Scanner stuff
-
- stream_start_produced bool // Have we started to scan the input stream?
- stream_end_produced bool // Have we reached the end of the input stream?
-
- flow_level int // The number of unclosed '[' and '{' indicators.
-
- tokens []yaml_token_t // The tokens queue.
- tokens_head int // The head of the tokens queue.
- tokens_parsed int // The number of tokens fetched from the queue.
- token_available bool // Does the tokens queue contain a token ready for dequeueing.
-
- indent int // The current indentation level.
- indents []int // The indentation levels stack.
-
- simple_key_allowed bool // May a simple key occur at the current position?
- simple_keys []yaml_simple_key_t // The stack of simple keys.
-
- // Parser stuff
-
- state yaml_parser_state_t // The current parser state.
- states []yaml_parser_state_t // The parser states stack.
- marks []yaml_mark_t // The stack of marks.
- tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
- // Dumper stuff
-
- aliases []yaml_alias_data_t // The alias data.
-
- document *yaml_document_t // The currently parsed document.
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output. The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out] data A pointer to an application data specified by
-// yaml_emitter_set_output().
-// @param[in] buffer The buffer with bytes to be written.
-// @param[in] size The size of the buffer.
-//
-// @returns On success, the handler should return @c 1. If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
- // Expect STREAM-START.
- yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
- yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
- yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
- yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
- yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
- yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
- yaml_EMIT_END_STATE // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal. Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
- problem string // Error description.
-
- // Writer stuff
-
- write_handler yaml_write_handler_t // Write handler.
-
- output_buffer *[]byte // String output data.
- output_file io.Writer // File output data.
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The stream encoding.
-
- // Emitter stuff
-
- canonical bool // If the output is in the canonical style?
- best_indent int // The number of indentation spaces.
- best_width int // The preferred width of the output lines.
- unicode bool // Allow unescaped non-ASCII characters?
- line_break yaml_break_t // The preferred line break.
-
- state yaml_emitter_state_t // The current emitter state.
- states []yaml_emitter_state_t // The stack of states.
-
- events []yaml_event_t // The event queue.
- events_head int // The head of the event queue.
-
- indents []int // The stack of indentation levels.
-
- tag_directives []yaml_tag_directive_t // The list of tag directives.
-
- indent int // The current indentation level.
-
- flow_level int // The current flow level.
-
- root_context bool // Is it the document root context?
- sequence_context bool // Is it a sequence context?
- mapping_context bool // Is it a mapping context?
- simple_key_context bool // Is it a simple mapping key context?
-
- line int // The current line.
- column int // The current column.
- whitespace bool // If the last character was a whitespace?
- indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
- open_ended bool // If an explicit document end is required?
-
- // Anchor analysis.
- anchor_data struct {
- anchor []byte // The anchor value.
- alias bool // Is it an alias?
- }
-
- // Tag analysis.
- tag_data struct {
- handle []byte // The tag handle.
- suffix []byte // The tag suffix.
- }
-
- // Scalar analysis.
- scalar_data struct {
- value []byte // The scalar value.
- multiline bool // Does the scalar contain line breaks?
- flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
- block_plain_allowed bool // Can the scalar be expressed in the block plain style?
- single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
- block_allowed bool // Can the scalar be expressed in the literal or folded styles?
- style yaml_scalar_style_t // The output style.
- }
-
- // Dumper stuff
-
- opened bool // If the stream was already opened?
- closed bool // If the stream was already closed?
-
- // The information associated with the document nodes.
- anchors *struct {
- references int // The number of references.
- anchor int // The anchor id.
- serialized bool // If the node has been emitted?
- }
-
- last_anchor_id int // The last assigned anchor id.
-
- document *yaml_document_t // The currently emitted document.
-}
diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go
deleted file mode 100644
index 8110ce3c37..0000000000
--- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package yaml
-
-const (
- // The size of the input raw buffer.
- input_raw_buffer_size = 512
-
- // The size of the input buffer.
- // It should be possible to decode the whole raw buffer.
- input_buffer_size = input_raw_buffer_size * 3
-
- // The size of the output buffer.
- output_buffer_size = 128
-
- // The size of the output raw buffer.
- // It should be possible to encode the whole output buffer.
- output_raw_buffer_size = (output_buffer_size*2 + 2)
-
- // The size of other stacks and queues.
- initial_stack_size = 16
- initial_queue_size = 16
- initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
- return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
- bi := b[i]
- if bi >= 'A' && bi <= 'F' {
- return int(bi) - 'A' + 10
- }
- if bi >= 'a' && bi <= 'f' {
- return int(bi) - 'a' + 10
- }
- return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
- return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
- return ((b[i] == 0x0A) || // . == #x0A
- (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
- (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
- (b[i] > 0xC2 && b[i] < 0xED) ||
- (b[i] == 0xED && b[i+1] < 0xA0) ||
- (b[i] == 0xEE) ||
- (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
- !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
- !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
- return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
- return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
- return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
- return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
- //return is_space(b, i) || is_tab(b, i)
- return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
- return (b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
- return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
- //return is_break(b, i) || is_z(b, i)
- return ( // is_break:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- // is_z:
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
- //return is_space(b, i) || is_breakz(b, i)
- return ( // is_space:
- b[i] == ' ' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
- //return is_blank(b, i) || is_breakz(b, i)
- return ( // is_blank:
- b[i] == ' ' || b[i] == '\t' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
- // Don't replace these by a switch without first
- // confirming that it is being inlined.
- if b&0x80 == 0x00 {
- return 1
- }
- if b&0xE0 == 0xC0 {
- return 2
- }
- if b&0xF0 == 0xE0 {
- return 3
- }
- if b&0xF8 == 0xF0 {
- return 4
- }
- return 0
-
-}
diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md
index 01a3de1baa..fba712ad91 100644
--- a/vendor/github.com/influxdata/influxdb/CHANGELOG.md
+++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md
@@ -7,7 +7,9 @@
### Features
- [#8495](https://github.com/influxdata/influxdb/pull/8495): Improve CLI connection warnings
-- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup can produce data in the same format as the enterprise backup/restore tool.
+- [#3019](https://github.com/influxdata/influxdb/issues/3019): Backup utility prints a list of backup files.
+- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup/Restore can produce/consume data in the same format as the enterprise backup/restore tool.
+- [#8880](https://github.com/influxdata/influxdb/issues/8879): Restore runs in online mode, does not delete existing databases
- [#8879](https://github.com/influxdata/influxdb/issues/8879): Export functionality using start/end to filter exported data by timestamp
- [#9084](https://github.com/influxdata/influxdb/pull/9084): Handle high cardinality deletes in TSM engine
- [#9162](https://github.com/influxdata/influxdb/pull/9162): Improve inmem index startup performance for high cardinality.
@@ -15,6 +17,7 @@
- [#9181](https://github.com/influxdata/influxdb/pull/9181): Schedule a full compaction after a successful import
- [#9218](https://github.com/influxdata/influxdb/pull/9218): Add Prometheus `/metrics` endpoint.
- [#9213](https://github.com/influxdata/influxdb/pull/9213): Add ability to generate shard digests.
+- [#9184](https://github.com/influxdata/influxdb/pull/9184): Allow setting the node id in the influx cli program.
### Bugfixes
@@ -26,6 +29,10 @@
- [#8789](https://github.com/influxdata/influxdb/issues/8789): Fix CLI to allow quoted database names in use statement
- [#9208](https://github.com/influxdata/influxdb/pull/9208): Updated client 4xx error message when response body length is zero.
- [#9230](https://github.com/influxdata/influxdb/pull/9230): Remove extraneous newlines from the log.
+- [#9226](https://github.com/influxdata/influxdb/issues/9226): Allow lone boolean literals in a condition expression.
+- [#9235](https://github.com/influxdata/influxdb/pull/9235): Improve performance when writes exceed `max-values-per-tag` or `max-series`.
+- [#9216](https://github.com/influxdata/influxdb/issues/9216): Prevent a panic when a query simultaneously finishes and is killed at the same time.
+- [#9255](https://github.com/influxdata/influxdb/issues/9255): Fix missing sorting of blocks by time when compacting.
## v1.4.3 [unreleased]
diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps
index 33f3e2f83a..b0222ef6d4 100644
--- a/vendor/github.com/influxdata/influxdb/Godeps
+++ b/vendor/github.com/influxdata/influxdb/Godeps
@@ -1,5 +1,6 @@
collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
+github.com/RoaringBitmap/roaring cefad6e4f79d4fa5d1d758ff937dde300641ccfa
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda
@@ -8,11 +9,12 @@ github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb
github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef
github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486
+github.com/glycerine/go-unsnap-stream 62a9a9eb44fd8932157b1a8ace2149eff5971af6
github.com/gogo/protobuf 1c2b16bc280d6635de6c52fc1471ab962dc36ec9
github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
github.com/google/go-cmp 18107e6c56edb2d51f965f7d68e59404f0daee54
-github.com/influxdata/influxql c108c5fb9a432242754d18371795aa8099e73fe7
+github.com/influxdata/influxql 851636b092678c9816f183ca8267a490c19be163
github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967
github.com/influxdata/yamux 1f58ded512de5feabbe30b60c7d33a7a896c5f16
github.com/influxdata/yarpc 036268cdec22b7074cd6d50cc6d7315c667063c7
@@ -27,7 +29,6 @@ github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
github.com/prometheus/common 2e54d0b93cba2fd133edc32211dcc32c06ef72ca
github.com/prometheus/procfs a6e9df898b1336106c743392c48ee0b71f5c4efa
github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d
-github.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce
github.com/tinylib/msgp ad0ff2e232ad2e37faf67087fb24bf8d04a8ce20
github.com/xlab/treeprint 06dfc6fa17cdde904617990a0c2d89e3e332dbb3
go.uber.org/atomic 54f72d32435d760d5604f17a82e2435b28dc4ba5
@@ -35,6 +36,7 @@ go.uber.org/multierr fb7d312c2c04c34f0ad621048bbb953b168f9ff6
go.uber.org/zap 35aad584952c3e7020db7b839f6b102de6271f89
golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd
golang.org/x/net 9dfe39835686865bff950a07b394c12a98ddc811
+golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
golang.org/x/sys 062cd7e4e68206d8bab9b18396626e855c992658
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
golang.org/x/time 6dc17368e09b0e8634d71cac8168d853e869a0c7
diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
index a9a0eb717c..f2d9e01d59 100644
--- a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
+++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md
@@ -1,7 +1,9 @@
-# List
+- # List
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
+- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE)
+- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
@@ -10,21 +12,49 @@
- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
+- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE)
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
+- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
+- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE)
- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
+- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE)
+- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE)
- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
-- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
+- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
+- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
-- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
+- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md)
+- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
+- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE)
+- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE)
+- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
+- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE)
- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt)
- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
+- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE)
+- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE)
- golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE)
+- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE)
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)
- github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go
index 36c526d993..a7b5d2fe86 100644
--- a/vendor/github.com/influxdata/influxdb/client/influxdb.go
+++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go
@@ -48,6 +48,15 @@ type Query struct {
//
// Chunked must be set to true for this option to be used.
ChunkSize int
+
+ // NodeID sets the data node to use for the query results. This option only
+ // has any effect in the enterprise version of the software where there can be
+ // more than one data node and is primarily useful for analyzing differences in
+ // data. The default behavior is to automatically select the appropriate data
+ // nodes to retrieve all of the data. On a database where the number of data nodes
+ // is greater than the replication factor, it is expected that setting this option
+ // will only retrieve partial data.
+ NodeID int
}
// ParseConnectionString will parse a string to create a valid connection URL
@@ -198,6 +207,9 @@ func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) {
values.Set("chunk_size", strconv.Itoa(q.ChunkSize))
}
}
+ if q.NodeID > 0 {
+ values.Set("node_id", strconv.Itoa(q.NodeID))
+ }
if c.precision != "" {
values.Set("epoch", c.precision)
}
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go
index 8d447bdd1b..f01415e4d1 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go
@@ -50,6 +50,7 @@ type CommandLine struct {
Import bool
Chunked bool
ChunkSize int
+ NodeID int
Quit chan struct{}
IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing)
ForceTTY bool // Force the CLI to act as if it were connected to a TTY
@@ -284,6 +285,8 @@ func (c *CommandLine) ParseCommand(cmd string) error {
}
case "use":
c.use(cmd)
+ case "node":
+ c.node(cmd)
case "insert":
return c.Insert(cmd)
case "clear":
@@ -513,6 +516,26 @@ func (c *CommandLine) retentionPolicyExists(db, rp string) bool {
return true
}
+func (c *CommandLine) node(cmd string) {
+ args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
+ if len(args) != 2 {
+ fmt.Println("Improper number of arguments for 'node' command, requires exactly one.")
+ return
+ }
+
+ if args[1] == "clear" {
+ c.NodeID = 0
+ return
+ }
+
+ id, err := strconv.Atoi(args[1])
+ if err != nil {
+ fmt.Printf("Unable to parse node id from %s. Must be an integer or 'clear'.\n", args[1])
+ return
+ }
+ c.NodeID = id
+}
+
// SetChunkSize sets the chunk size
// 0 sets it back to the default
func (c *CommandLine) SetChunkSize(cmd string) {
@@ -711,6 +734,7 @@ func (c *CommandLine) query(query string) client.Query {
Database: c.Database,
Chunked: c.Chunked,
ChunkSize: c.ChunkSize,
+ NodeID: c.NodeID,
}
}
@@ -1020,8 +1044,7 @@ func (c *CommandLine) help() {
show field keys show field key information
A full list of influxql commands can be found at:
- https://docs.influxdata.com/influxdb/latest/query_language/spec/
-`)
+ https://docs.influxdata.com/influxdb/latest/query_language/spec/`)
}
func (c *CommandLine) history() {
@@ -1091,9 +1114,7 @@ func (c *CommandLine) gopher() {
o: -h///++////-.
/: .o/
//+ 'y
- ./sooy.
-
-`)
+ ./sooy.`)
}
// Version prints the CLI version.
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go
index 7b95348302..4ab0fa5ae1 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go
@@ -51,6 +51,7 @@ func main() {
fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.")
fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.")
fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.")
+ fs.IntVar(&c.NodeID, "node", 0, "Specify the node that data should be retrieved from (enterprise only).")
fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.")
fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.")
fs.BoolVar(&c.Import, "import", false, "Import a previous database.")
@@ -104,8 +105,7 @@ Examples:
$ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty
# Connect to a specific database on startup and set database context:
- $ influx -database 'metrics' -host 'localhost' -port '8086'
-`)
+ $ influx -database 'metrics' -host 'localhost' -port '8086'`)
}
fs.Parse(os.Args[1:])
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go
index 655a93642b..0321f49369 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go
@@ -2,6 +2,7 @@
package dumptsi
import (
+ "errors"
"flag"
"fmt"
"io"
@@ -10,7 +11,9 @@ import (
"regexp"
"text/tabwriter"
+ "github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
)
@@ -20,7 +23,8 @@ type Command struct {
Stderr io.Writer
Stdout io.Writer
- paths []string
+ seriesFilePath string
+ paths []string
showSeries bool
showMeasurements bool
@@ -45,6 +49,7 @@ func NewCommand() *Command {
func (cmd *Command) Run(args ...string) error {
var measurementFilter, tagKeyFilter, tagValueFilter string
fs := flag.NewFlagSet("dumptsi", flag.ExitOnError)
+ fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Path to series file")
fs.BoolVar(&cmd.showSeries, "series", false, "Show raw series data")
fs.BoolVar(&cmd.showMeasurements, "measurements", false, "Show raw measurement data")
fs.BoolVar(&cmd.showTagKeys, "tag-keys", false, "Show raw tag key data")
@@ -82,6 +87,11 @@ func (cmd *Command) Run(args ...string) error {
cmd.tagValueFilter = re
}
+ // Validate series file path.
+ if cmd.seriesFilePath == "" {
+ return errors.New("series file path required")
+ }
+
cmd.paths = fs.Args()
if len(cmd.paths) == 0 {
fmt.Printf("at least one path required\n\n")
@@ -104,22 +114,47 @@ func (cmd *Command) Run(args ...string) error {
}
func (cmd *Command) run() error {
+ sfile := tsdb.NewSeriesFile(cmd.seriesFilePath)
+ sfile.Logger = logger.New(os.Stderr)
+ if err := sfile.Open(); err != nil {
+ return err
+ }
+ defer sfile.Close()
+
// Build a file set from the paths on the command line.
- idx, fs, err := cmd.readFileSet()
+ idx, fs, err := cmd.readFileSet(sfile)
if err != nil {
return err
}
- if idx != nil {
- defer idx.Close()
- } else {
+ // If this is an ad-hoc fileset then process it and close afterward.
+ if fs != nil {
+ defer fs.Release()
defer fs.Close()
+ return cmd.printFileSet(sfile, fs)
+ }
+
+ // Otherwise iterate over each partition in the index.
+ defer idx.Close()
+ for i := 0; i < int(idx.PartitionN); i++ {
+ if err := func() error {
+ fs, err := idx.PartitionAt(i).RetainFileSet()
+ if err != nil {
+ return err
+ }
+ defer fs.Release()
+ return cmd.printFileSet(sfile, fs)
+ }(); err != nil {
+ return err
+ }
}
- defer fs.Release()
+ return nil
+}
+func (cmd *Command) printFileSet(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error {
// Show either raw data or summary stats.
if cmd.showSeries || cmd.showMeasurements {
- if err := cmd.printMerged(fs); err != nil {
+ if err := cmd.printMerged(sfile, fs); err != nil {
return err
}
} else {
@@ -131,20 +166,21 @@ func (cmd *Command) run() error {
return nil
}
-func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) {
+func (cmd *Command) readFileSet(sfile *tsdb.SeriesFile) (*tsi1.Index, *tsi1.FileSet, error) {
// If only one path exists and it's a directory then open as an index.
if len(cmd.paths) == 1 {
fi, err := os.Stat(cmd.paths[0])
if err != nil {
return nil, nil, err
} else if fi.IsDir() {
- idx := tsi1.NewIndex()
- idx.Path = cmd.paths[0]
- idx.CompactionEnabled = false
+ idx := tsi1.NewIndex(sfile,
+ tsi1.WithPath(cmd.paths[0]),
+ tsi1.DisableCompactions(),
+ )
if err := idx.Open(); err != nil {
return nil, nil, err
}
- return idx, idx.RetainFileSet(), nil
+ return idx, nil, nil
}
}
@@ -153,14 +189,14 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) {
for _, path := range cmd.paths {
switch ext := filepath.Ext(path); ext {
case tsi1.LogFileExt:
- f := tsi1.NewLogFile(path)
+ f := tsi1.NewLogFile(sfile, path)
if err := f.Open(); err != nil {
return nil, nil, err
}
files = append(files, f)
case tsi1.IndexFileExt:
- f := tsi1.NewIndexFile()
+ f := tsi1.NewIndexFile(sfile)
f.SetPath(path)
if err := f.Open(); err != nil {
return nil, nil, err
@@ -172,7 +208,7 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) {
}
}
- fs, err := tsi1.NewFileSet("", nil, files)
+ fs, err := tsi1.NewFileSet("", nil, sfile, files)
if err != nil {
return nil, nil, err
}
@@ -181,16 +217,16 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) {
return nil, fs, nil
}
-func (cmd *Command) printMerged(fs *tsi1.FileSet) error {
- if err := cmd.printSeries(fs); err != nil {
+func (cmd *Command) printMerged(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error {
+ if err := cmd.printSeries(sfile); err != nil {
return err
- } else if err := cmd.printMeasurements(fs); err != nil {
+ } else if err := cmd.printMeasurements(sfile, fs); err != nil {
return err
}
return nil
}
-func (cmd *Command) printSeries(fs *tsi1.FileSet) error {
+func (cmd *Command) printSeries(sfile *tsdb.SeriesFile) error {
if !cmd.showSeries {
return nil
}
@@ -200,15 +236,23 @@ func (cmd *Command) printSeries(fs *tsi1.FileSet) error {
fmt.Fprintln(tw, "Series\t")
// Iterate over each series.
- itr := fs.SeriesIterator()
- for e := itr.Next(); e != nil; e = itr.Next() {
- name, tags := e.Name(), e.Tags()
+ itr := sfile.SeriesIDIterator()
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return err
+ } else if e.SeriesID == 0 {
+ break
+ }
+ name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID))
- if !cmd.matchSeries(e.Name(), e.Tags()) {
+ if !cmd.matchSeries(name, tags) {
continue
}
- fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(e.Deleted()))
+ deleted := sfile.IsDeleted(e.SeriesID)
+
+ fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(deleted))
}
// Flush & write footer spacing.
@@ -220,7 +264,7 @@ func (cmd *Command) printSeries(fs *tsi1.FileSet) error {
return nil
}
-func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error {
+func (cmd *Command) printMeasurements(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error {
if !cmd.showMeasurements {
return nil
}
@@ -240,7 +284,7 @@ func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error {
return err
}
- if err := cmd.printTagKeys(fs, e.Name()); err != nil {
+ if err := cmd.printTagKeys(sfile, fs, e.Name()); err != nil {
return err
}
}
@@ -251,7 +295,7 @@ func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error {
return nil
}
-func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error {
+func (cmd *Command) printTagKeys(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name []byte) error {
if !cmd.showTagKeys {
return nil
}
@@ -269,7 +313,7 @@ func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error {
return err
}
- if err := cmd.printTagValues(fs, name, e.Key()); err != nil {
+ if err := cmd.printTagValues(sfile, fs, name, e.Key()); err != nil {
return err
}
}
@@ -278,7 +322,7 @@ func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error {
return nil
}
-func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error {
+func (cmd *Command) printTagValues(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key []byte) error {
if !cmd.showTagValues {
return nil
}
@@ -296,7 +340,7 @@ func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error {
return err
}
- if err := cmd.printTagValueSeries(fs, name, key, e.Value()); err != nil {
+ if err := cmd.printTagValueSeries(sfile, fs, name, key, e.Value()); err != nil {
return err
}
}
@@ -305,20 +349,29 @@ func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error {
return nil
}
-func (cmd *Command) printTagValueSeries(fs *tsi1.FileSet, name, key, value []byte) error {
+func (cmd *Command) printTagValueSeries(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key, value []byte) error {
if !cmd.showTagValueSeries {
return nil
}
// Iterate over each series.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
- itr := fs.TagValueSeriesIterator(name, key, value)
- for e := itr.Next(); e != nil; e = itr.Next() {
- if !cmd.matchSeries(e.Name(), e.Tags()) {
+ itr := fs.TagValueSeriesIDIterator(name, key, value)
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return err
+ } else if e.SeriesID == 0 {
+ break
+ }
+
+ name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID))
+
+ if !cmd.matchSeries(name, tags) {
continue
}
- fmt.Fprintf(tw, " %s%s\n", e.Name(), e.Tags().HashKey())
+ fmt.Fprintf(tw, " %s%s\n", name, tags.HashKey())
if err := tw.Flush(); err != nil {
return err
}
@@ -361,7 +414,6 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error {
fmt.Fprintf(cmd.Stdout, "[INDEX FILE] %s\n", filepath.Base(f.Path()))
// Calculate summary stats.
- seriesN := f.SeriesN()
var measurementN, measurementSeriesN, measurementSeriesSize uint64
var keyN uint64
var valueN, valueSeriesN, valueSeriesSize uint64
@@ -386,7 +438,6 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error {
// Write stats.
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
- fmt.Fprintf(tw, "Series:\t%d\n", seriesN)
fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN)
fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize))
fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN))
@@ -395,7 +446,6 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error {
fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN)
fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize))
fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN))
- fmt.Fprintf(tw, "Avg tags per series:\t%.01f\n", float64(valueSeriesN)/float64(seriesN))
if err := tw.Flush(); err != nil {
return err
}
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go
index 761b2948df..db09dac52d 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go
@@ -38,21 +38,29 @@ func NewCommand() *Command {
// Run executes the command.
func (cmd *Command) Run(args ...string) error {
fs := flag.NewFlagSet("inmem2tsi", flag.ExitOnError)
+ seriesFilePath := fs.String("series-file", "", "series file path")
dataDir := fs.String("datadir", "", "shard data directory")
walDir := fs.String("waldir", "", "shard WAL directory")
fs.BoolVar(&cmd.Verbose, "v", false, "verbose")
fs.SetOutput(cmd.Stdout)
if err := fs.Parse(args); err != nil {
return err
- } else if fs.NArg() > 0 || *dataDir == "" || *walDir == "" {
+ } else if fs.NArg() > 0 || *seriesFilePath == "" || *dataDir == "" || *walDir == "" {
return flag.ErrHelp
}
cmd.Logger = logger.New(cmd.Stderr)
- return cmd.run(*dataDir, *walDir)
+ return cmd.run(*seriesFilePath, *dataDir, *walDir)
}
-func (cmd *Command) run(dataDir, walDir string) error {
+func (cmd *Command) run(seriesFilePath, dataDir, walDir string) error {
+ sfile := tsdb.NewSeriesFile(seriesFilePath)
+ sfile.Logger = cmd.Logger
+ if err := sfile.Open(); err != nil {
+ return err
+ }
+ defer sfile.Close()
+
// Check if shard already has a TSI index.
indexPath := filepath.Join(dataDir, "index")
cmd.Logger.Info("checking index path", zap.String("path", indexPath))
@@ -83,8 +91,9 @@ func (cmd *Command) run(dataDir, walDir string) error {
}
// Open TSI index in temporary path.
- tsiIndex := tsi1.NewIndex()
- tsiIndex.Path = tmpPath
+ tsiIndex := tsi1.NewIndex(sfile,
+ tsi1.WithPath(tmpPath),
+ )
tsiIndex.WithLogger(cmd.Logger)
cmd.Logger.Info("opening tsi index in temporary location", zap.String("path", tmpPath))
if err := tsiIndex.Open(); err != nil {
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go
index 13e14d4b9b..2a6b8d6a19 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go
@@ -2,23 +2,35 @@
package backup
import (
+ "compress/gzip"
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
+ "io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"time"
- "compress/gzip"
"github.com/influxdata/influxdb/cmd/influxd/backup_util"
"github.com/influxdata/influxdb/services/snapshotter"
"github.com/influxdata/influxdb/tcp"
- "io/ioutil"
+)
+
+const (
+ // Suffix is a suffix added to the backup while it's in-process.
+ Suffix = ".pending"
+
+ // Metafile is the base name given to the metastore backups.
+ Metafile = "meta"
+
+ // BackupFilePattern is the beginning of the pattern for a backup
+ // file. They follow the scheme ...
+ BackupFilePattern = "%s.%s.%05d"
)
// Command represents the program execution for "influxd backup".
@@ -45,6 +57,8 @@ type Command struct {
enterprise bool
manifest backup_util.Manifest
enterpriseFileBase string
+
+ BackupFiles []string
}
// NewCommand returns a new instance of Command with default settings.
@@ -105,19 +119,22 @@ func (cmd *Command) Run(args ...string) error {
}
if cmd.enterprise {
- cmd.manifest.Platform = "OSS"
filename := cmd.enterpriseFileBase + ".manifest"
if err := cmd.manifest.Save(filepath.Join(cmd.path, filename)); err != nil {
cmd.StderrLogger.Printf("manifest save failed: %v", err)
return err
}
+ cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
if err != nil {
cmd.StderrLogger.Printf("backup failed: %v", err)
return err
}
- cmd.StdoutLogger.Println("backup complete")
+ cmd.StdoutLogger.Println("backup complete:")
+ for _, v := range cmd.BackupFiles {
+ cmd.StdoutLogger.Println("\t" + filepath.Join(cmd.path, v))
+ }
return nil
}
@@ -146,6 +163,8 @@ func (cmd *Command) parseFlags(args []string) (err error) {
return err
}
+ cmd.BackupFiles = []string{}
+
// for enterprise saving, if needed
cmd.enterpriseFileBase = time.Now().UTC().Format(backup_util.EnterpriseFileNamePattern)
@@ -225,6 +244,9 @@ func (cmd *Command) backupShard(db, rp, sid string) error {
// TODO: verify shard backup data
err = cmd.downloadAndVerify(req, shardArchivePath, nil)
+ if !cmd.enterprise {
+ cmd.BackupFiles = append(cmd.BackupFiles, shardArchivePath)
+ }
if err != nil {
return err
@@ -283,6 +305,8 @@ func (cmd *Command) backupShard(db, rp, sid string) error {
if err := out.Close(); err != nil {
return err
}
+
+ cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
return nil
@@ -389,6 +413,10 @@ func (cmd *Command) backupMetastore() error {
return err
}
+ if !cmd.enterprise {
+ cmd.BackupFiles = append(cmd.BackupFiles, metastoreArchivePath)
+ }
+
if cmd.enterprise {
metaBytes, err := backup_util.GetMetaBytes(metastoreArchivePath)
defer os.Remove(metastoreArchivePath)
@@ -396,13 +424,19 @@ func (cmd *Command) backupMetastore() error {
return err
}
filename := cmd.enterpriseFileBase + ".meta"
- if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), metaBytes, 0644); err != nil {
+ ep := backup_util.EnterprisePacker{Data: metaBytes, MaxNodeID: 0}
+ protoBytes, err := ep.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), protoBytes, 0644); err != nil {
fmt.Fprintln(cmd.Stdout, "Error.")
return err
}
cmd.manifest.Meta.FileName = filename
cmd.manifest.Meta.Size = int64(len(metaBytes))
+ cmd.BackupFiles = append(cmd.BackupFiles, filename)
}
return nil
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go
index 9c2a36d43a..f40b062db1 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go
@@ -3,17 +3,22 @@ package backup_util
import (
"bytes"
"encoding/binary"
+ "encoding/json"
"fmt"
"io"
"os"
+ "sort"
"strings"
- "encoding/json"
+ "github.com/gogo/protobuf/proto"
+ internal "github.com/influxdata/influxdb/cmd/influxd/backup_util/internal"
"github.com/influxdata/influxdb/services/snapshotter"
"io/ioutil"
"path/filepath"
)
+//go:generate protoc --gogo_out=. internal/data.proto
+
const (
// Suffix is a suffix added to the backup while it's in-process.
Suffix = ".pending"
@@ -26,11 +31,28 @@ const (
BackupFilePattern = "%s.%s.%05d"
EnterpriseFileNamePattern = "20060102T150405Z"
+)
+
+type EnterprisePacker struct {
+ Data []byte
+ MaxNodeID uint64
+}
- OSSManifest = "OSS"
+func (ep EnterprisePacker) MarshalBinary() ([]byte, error) {
+ ed := internal.EnterpriseData{Data: ep.Data, MaxNodeID: &ep.MaxNodeID}
+ return proto.Marshal(&ed)
+}
- ENTManifest = "ENT"
-)
+func (ep *EnterprisePacker) UnmarshalBinary(data []byte) error {
+ var pb internal.EnterpriseData
+ if err := proto.Unmarshal(data, &pb); err != nil {
+ return err
+ }
+
+ ep.Data = pb.GetData()
+ ep.MaxNodeID = pb.GetMaxNodeID()
+ return nil
+}
func GetMetaBytes(fname string) ([]byte, error) {
f, err := os.Open(fname)
@@ -65,10 +87,9 @@ func GetMetaBytes(fname string) ([]byte, error) {
// If Limited is false, the manifest contains a full backup, otherwise
// it is a partial backup.
type Manifest struct {
- Platform string `json:"platform"`
- Meta MetaEntry `json:"meta"`
- Limited bool `json:"limited"`
- Files []Entry `json:"files"`
+ Meta MetaEntry `json:"meta"`
+ Limited bool `json:"limited"`
+ Files []Entry `json:"files"`
// If limited is true, then one (or all) of the following fields will be set
@@ -123,6 +144,64 @@ func (manifest *Manifest) Save(filename string) error {
return ioutil.WriteFile(filename, b, 0600)
}
+// LoadIncremental loads multiple manifest files from a given directory.
+func LoadIncremental(dir string) (*MetaEntry, map[uint64]*Entry, error) {
+ manifests, err := filepath.Glob(filepath.Join(dir, "*.manifest"))
+ if err != nil {
+ return nil, nil, err
+ }
+ shards := make(map[uint64]*Entry)
+
+ if len(manifests) == 0 {
+ return nil, shards, nil
+ }
+
+ sort.Sort(sort.Reverse(sort.StringSlice(manifests)))
+ var metaEntry MetaEntry
+
+ for _, fileName := range manifests {
+ fi, err := os.Stat(fileName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if fi.IsDir() {
+ continue
+ }
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var manifest Manifest
+ err = json.NewDecoder(f).Decode(&manifest)
+ f.Close()
+ if err != nil {
+ return nil, nil, fmt.Errorf("read manifest: %v", err)
+ }
+
+ // sorted (descending) above, so first manifest is most recent
+ if metaEntry.FileName == "" {
+ metaEntry = manifest.Meta
+ }
+
+ for i := range manifest.Files {
+ sh := manifest.Files[i]
+ if _, err := os.Stat(filepath.Join(dir, sh.FileName)); err != nil {
+ continue
+ }
+
+ e := shards[sh.ShardID]
+ if e == nil || sh.LastModified > e.LastModified {
+ shards[sh.ShardID] = &sh
+ }
+ }
+ }
+
+ return &metaEntry, shards, nil
+}
+
type CountingWriter struct {
io.Writer
Total int64 // Total # of bytes transferred
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go
new file mode 100644
index 0000000000..41e4eb0370
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go
@@ -0,0 +1,71 @@
+// Code generated by protoc-gen-gogo.
+// source: internal/data.proto
+// DO NOT EDIT!
+
+/*
+Package backup_util is a generated protocol buffer package.
+
+It is generated from these files:
+ internal/data.proto
+
+It has these top-level messages:
+ EnterpriseData
+*/
+package backup_util
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type EnterpriseData struct {
+ Data []byte `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"`
+ MaxNodeID *uint64 `protobuf:"varint,2,req,name=MaxNodeID" json:"MaxNodeID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnterpriseData) Reset() { *m = EnterpriseData{} }
+func (m *EnterpriseData) String() string { return proto.CompactTextString(m) }
+func (*EnterpriseData) ProtoMessage() {}
+func (*EnterpriseData) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{0} }
+
+func (m *EnterpriseData) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *EnterpriseData) GetMaxNodeID() uint64 {
+ if m != nil && m.MaxNodeID != nil {
+ return *m.MaxNodeID
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*EnterpriseData)(nil), "backup_util.EnterpriseData")
+}
+
+func init() { proto.RegisterFile("internal/data.proto", fileDescriptorData) }
+
+var fileDescriptorData = []byte{
+ // 110 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2b, 0x49,
+ 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
+ 0xe2, 0x4e, 0x4a, 0x4c, 0xce, 0x2e, 0x2d, 0x88, 0x2f, 0x2d, 0xc9, 0xcc, 0x51, 0x72, 0xe2, 0xe2,
+ 0x73, 0x05, 0xa9, 0x29, 0x28, 0xca, 0x2c, 0x4e, 0x75, 0x49, 0x2c, 0x49, 0x14, 0x12, 0xe2, 0x62,
+ 0x01, 0xd1, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x0c, 0x17, 0xa7, 0x6f,
+ 0x62, 0x85, 0x5f, 0x7e, 0x4a, 0xaa, 0xa7, 0x8b, 0x04, 0x93, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x42,
+ 0x00, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xda, 0x53, 0xc6, 0x66, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto
new file mode 100644
index 0000000000..9e423b7474
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto
@@ -0,0 +1,12 @@
+package backup_util;
+
+//========================================================================
+//
+// Metadata
+//
+//========================================================================
+
+message EnterpriseData {
+ required bytes Data = 1;
+ required uint64 MaxNodeID = 2;
+}
diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go
index dd4eb6a048..1aca8431df 100644
--- a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go
+++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go
@@ -10,29 +10,50 @@ import (
"fmt"
"io"
"io/ioutil"
+ "log"
"os"
"path/filepath"
"strconv"
+ "strings"
+ "compress/gzip"
"github.com/influxdata/influxdb/cmd/influxd/backup_util"
+ tarstream "github.com/influxdata/influxdb/pkg/tar"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/services/snapshotter"
)
// Command represents the program execution for "influxd restore".
type Command struct {
- Stdout io.Writer
+ // The logger passed to the ticker during execution.
+ StdoutLogger *log.Logger
+ StderrLogger *log.Logger
+
+ // Standard input/output, overridden for testing.
Stderr io.Writer
+ Stdout io.Writer
- backupFilesPath string
- metadir string
- datadir string
- database string
- retention string
- shard string
+ host string
+ path string
+ client *snapshotter.Client
+
+ backupFilesPath string
+ metadir string
+ datadir string
+ destinationDatabase string
+ sourceDatabase string
+ backupRetention string
+ restoreRetention string
+ shard uint64
+ enterprise bool
+ online bool
+ manifestMeta *backup_util.MetaEntry
+ manifestFiles map[uint64]*backup_util.Entry
// TODO: when the new meta stuff is done this should not be exported or be gone
MetaConfig *meta.Config
+
+ shardIDMap map[uint64]uint64
}
// NewCommand returns a new instance of Command with default settings.
@@ -46,19 +67,32 @@ func NewCommand() *Command {
// Run executes the program.
func (cmd *Command) Run(args ...string) error {
+ // Set up logger.
+ cmd.StdoutLogger = log.New(cmd.Stdout, "", log.LstdFlags)
+ cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags)
if err := cmd.parseFlags(args); err != nil {
return err
}
+ if cmd.enterprise {
+ return cmd.runOnlineEnterprise()
+ } else if cmd.online {
+ return cmd.runOnlineLegacy()
+ } else {
+ return cmd.runOffline()
+ }
+}
+
+func (cmd *Command) runOffline() error {
if cmd.metadir != "" {
if err := cmd.unpackMeta(); err != nil {
return err
}
}
- if cmd.shard != "" {
+ if cmd.shard != 0 {
return cmd.unpackShard(cmd.shard)
- } else if cmd.retention != "" {
+ } else if cmd.restoreRetention != "" {
return cmd.unpackRetention()
} else if cmd.datadir != "" {
return cmd.unpackDatabase()
@@ -66,14 +100,49 @@ func (cmd *Command) Run(args ...string) error {
return nil
}
+func (cmd *Command) runOnlineEnterprise() error {
+ err := cmd.updateMetaEnterprise()
+ if err != nil {
+ cmd.StderrLogger.Printf("error updating meta: %v", err)
+ return err
+ }
+ err = cmd.uploadShardsEnterprise()
+ if err != nil {
+ cmd.StderrLogger.Printf("error updating shards: %v", err)
+ return err
+ }
+ return nil
+}
+
+func (cmd *Command) runOnlineLegacy() error {
+ err := cmd.updateMetaLegacy()
+ if err != nil {
+ cmd.StderrLogger.Printf("error updating meta: %v", err)
+ return err
+ }
+ err = cmd.uploadShardsLegacy()
+ if err != nil {
+ cmd.StderrLogger.Printf("error updating shards: %v", err)
+ return err
+ }
+ return nil
+}
+
// parseFlags parses and validates the command line arguments.
func (cmd *Command) parseFlags(args []string) error {
fs := flag.NewFlagSet("", flag.ContinueOnError)
+ fs.StringVar(&cmd.host, "host", "localhost:8088", "")
fs.StringVar(&cmd.metadir, "metadir", "", "")
fs.StringVar(&cmd.datadir, "datadir", "", "")
- fs.StringVar(&cmd.database, "database", "", "")
- fs.StringVar(&cmd.retention, "retention", "", "")
- fs.StringVar(&cmd.shard, "shard", "", "")
+ fs.StringVar(&cmd.destinationDatabase, "database", "", "")
+ fs.StringVar(&cmd.restoreRetention, "retention", "", "")
+ fs.StringVar(&cmd.sourceDatabase, "db", "", "")
+ fs.StringVar(&cmd.destinationDatabase, "newdb", "", "")
+ fs.StringVar(&cmd.backupRetention, "rp", "", "")
+ fs.StringVar(&cmd.restoreRetention, "newrp", "", "")
+ fs.Uint64Var(&cmd.shard, "shard", 0, "")
+ fs.BoolVar(&cmd.online, "online", false, "")
+ fs.BoolVar(&cmd.enterprise, "enterprise", false, "")
fs.SetOutput(cmd.Stdout)
fs.Usage = cmd.printUsage
if err := fs.Parse(args); err != nil {
@@ -82,6 +151,7 @@ func (cmd *Command) parseFlags(args []string) error {
cmd.MetaConfig = meta.NewConfig()
cmd.MetaConfig.Dir = cmd.metadir
+ cmd.client = snapshotter.NewClient(cmd.host)
// Require output path.
cmd.backupFilesPath = fs.Arg(0)
@@ -89,24 +159,53 @@ func (cmd *Command) parseFlags(args []string) error {
return fmt.Errorf("path with backup files required")
}
- // validate the arguments
- if cmd.metadir == "" && cmd.database == "" {
- return fmt.Errorf("-metadir or -database are required to restore")
+ fi, err := os.Stat(cmd.backupFilesPath)
+ if err != nil || !fi.IsDir() {
+ return fmt.Errorf("backup path should be a valid directory: %s", cmd.backupFilesPath)
}
- if cmd.database != "" && cmd.datadir == "" {
- return fmt.Errorf("-datadir is required to restore")
- }
+ if cmd.enterprise || cmd.online {
+ // validate the arguments
+
+ if cmd.metadir != "" {
+ return fmt.Errorf("offline parameter metadir found, not compatible with -enterprise")
+ }
- if cmd.shard != "" {
- if cmd.database == "" {
- return fmt.Errorf("-database is required to restore shard")
+ if cmd.datadir != "" {
+ return fmt.Errorf("offline parameter datadir found, not compatible with -enterprise")
}
- if cmd.retention == "" {
- return fmt.Errorf("-retention is required to restore shard")
+
+ if cmd.restoreRetention == "" {
+ cmd.restoreRetention = cmd.backupRetention
+ }
+
+ if cmd.enterprise {
+ var err error
+ cmd.manifestMeta, cmd.manifestFiles, err = backup_util.LoadIncremental(cmd.backupFilesPath)
+ if err != nil {
+ return fmt.Errorf("restore failed while processing manifest files: %s", err.Error())
+ }
+ }
+ } else {
+ // validate the arguments
+ if cmd.metadir == "" && cmd.destinationDatabase == "" {
+ return fmt.Errorf("-metadir or -destinationDatabase are required to restore")
+ }
+
+ if cmd.destinationDatabase != "" && cmd.datadir == "" {
+ return fmt.Errorf("-datadir is required to restore")
+ }
+
+ if cmd.shard != 0 {
+ if cmd.destinationDatabase == "" {
+ return fmt.Errorf("-destinationDatabase is required to restore shard")
+ }
+ if cmd.backupRetention == "" {
+ return fmt.Errorf("-retention is required to restore shard")
+ }
+ } else if cmd.backupRetention != "" && cmd.destinationDatabase == "" {
+ return fmt.Errorf("-destinationDatabase is required to restore retention policy")
}
- } else if cmd.retention != "" && cmd.database == "" {
- return fmt.Errorf("-database is required to restore retention policy")
}
return nil
@@ -171,7 +270,7 @@ func (cmd *Command) unpackMeta() error {
c.Dir = cmd.metadir
// Create the meta dir
- if os.MkdirAll(c.Dir, 0700); err != nil {
+ if err := os.MkdirAll(c.Dir, 0700); err != nil {
return err
}
@@ -212,11 +311,75 @@ func (cmd *Command) unpackMeta() error {
return nil
}
+func (cmd *Command) updateMetaEnterprise() error {
+ var metaBytes []byte
+ fileName := filepath.Join(cmd.backupFilesPath, cmd.manifestMeta.FileName)
+
+ fileBytes, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return err
+ }
+
+ var ep backup_util.EnterprisePacker
+ ep.UnmarshalBinary(fileBytes)
+
+ metaBytes = ep.Data
+
+ req := &snapshotter.Request{
+ Type: snapshotter.RequestMetaStoreUpdate,
+ BackupDatabase: cmd.sourceDatabase,
+ RestoreDatabase: cmd.destinationDatabase,
+ BackupRetentionPolicy: cmd.backupRetention,
+ RestoreRetentionPolicy: cmd.restoreRetention,
+ UploadSize: int64(len(metaBytes)),
+ }
+
+ shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes))
+ cmd.shardIDMap = shardIDMap
+ return err
+
+}
+
+// updateMetaLive takes a metadata backup and sends it to the influx server
+// for a live merger of metadata.
+func (cmd *Command) updateMetaLegacy() error {
+
+ var metaBytes []byte
+
+ // find the meta file
+ metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*"))
+ if err != nil {
+ return err
+ }
+
+ if len(metaFiles) == 0 {
+ return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath)
+ }
+
+ fileName := metaFiles[len(metaFiles)-1]
+ cmd.StdoutLogger.Printf("Using metastore snapshot: %v\n", fileName)
+ metaBytes, err = backup_util.GetMetaBytes(fileName)
+
+ req := &snapshotter.Request{
+ Type: snapshotter.RequestMetaStoreUpdate,
+ BackupDatabase: cmd.sourceDatabase,
+ RestoreDatabase: cmd.destinationDatabase,
+ BackupRetentionPolicy: cmd.backupRetention,
+ RestoreRetentionPolicy: cmd.restoreRetention,
+ UploadSize: int64(len(metaBytes)),
+ }
+
+ shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes))
+ cmd.shardIDMap = shardIDMap
+ return err
+}
+
// unpackShard will look for all backup files in the path matching this shard ID
// and restore them to the data dir
-func (cmd *Command) unpackShard(shardID string) error {
+func (cmd *Command) unpackShard(shard uint64) error {
+ shardID := strconv.FormatUint(shard, 10)
// make sure the shard isn't already there so we don't clobber anything
- restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID)
+ restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention, shardID)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("shard already present: %s", restorePath)
}
@@ -227,21 +390,93 @@ func (cmd *Command) unpackShard(shardID string) error {
}
// find the shard backup files
- pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.database, cmd.retention, id))
+ pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.destinationDatabase, cmd.restoreRetention, id))
return cmd.unpackFiles(pat + ".*")
}
-// unpackDatabase will look for all backup files in the path matching this database
+func (cmd *Command) uploadShardsEnterprise() error {
+ for _, file := range cmd.manifestFiles {
+ if cmd.sourceDatabase == "" || cmd.sourceDatabase == file.Database {
+ if cmd.backupRetention == "" || cmd.backupRetention == file.Policy {
+ if cmd.shard == 0 || cmd.shard == file.ShardID {
+ cmd.StdoutLogger.Printf("Restoring shard %d live from backup %s\n", file.ShardID, file.FileName)
+ f, err := os.Open(filepath.Join(cmd.backupFilesPath, file.FileName))
+ if err != nil {
+ f.Close()
+ return err
+ }
+ gr, err := gzip.NewReader(f)
+ if err != nil {
+ f.Close()
+ return err
+ }
+ tr := tar.NewReader(gr)
+ targetDB := cmd.destinationDatabase
+ if targetDB == "" {
+ targetDB = file.Database
+ }
+
+ if err := cmd.client.UploadShard(file.ShardID, cmd.shardIDMap[file.ShardID], cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil {
+ f.Close()
+ return err
+ }
+ f.Close()
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// unpackFiles will look for backup files matching the pattern and restore them to the data dir
+func (cmd *Command) uploadShardsLegacy() error {
+ // find the destinationDatabase backup files
+ pat := fmt.Sprintf("%s.*", filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase))
+ cmd.StdoutLogger.Printf("Restoring live from backup %s\n", pat)
+ backupFiles, err := filepath.Glob(pat)
+ if err != nil {
+ return err
+ }
+ if len(backupFiles) == 0 {
+ return fmt.Errorf("no backup files in %s", cmd.backupFilesPath)
+ }
+
+ for _, fn := range backupFiles {
+ parts := strings.Split(fn, ".")
+
+ if len(parts) != 4 {
+ cmd.StderrLogger.Printf("Skipping mis-named backup file: %s", fn)
+ }
+ shardID, err := strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return err
+ }
+ f, err := os.Open(fn)
+ if err != nil {
+ return err
+ }
+ tr := tar.NewReader(f)
+ if err := cmd.client.UploadShard(shardID, cmd.shardIDMap[shardID], cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil {
+ f.Close()
+ return err
+ }
+ f.Close()
+ }
+
+ return nil
+}
+
+// unpackDatabase will look for all backup files in the path matching this destinationDatabase
// and restore them to the data dir
func (cmd *Command) unpackDatabase() error {
// make sure the shard isn't already there so we don't clobber anything
- restorePath := filepath.Join(cmd.datadir, cmd.database)
+ restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("database already present: %s", restorePath)
}
// find the database backup files
- pat := filepath.Join(cmd.backupFilesPath, cmd.database)
+ pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase)
return cmd.unpackFiles(pat + ".*")
}
@@ -249,19 +484,19 @@ func (cmd *Command) unpackDatabase() error {
// and restore them to the data dir
func (cmd *Command) unpackRetention() error {
// make sure the shard isn't already there so we don't clobber anything
- restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention)
+ restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention)
if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("retention already present: %s", restorePath)
}
// find the retention backup files
- pat := filepath.Join(cmd.backupFilesPath, cmd.database)
- return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention))
+ pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase)
+ return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.restoreRetention))
}
// unpackFiles will look for backup files matching the pattern and restore them to the data dir
func (cmd *Command) unpackFiles(pat string) error {
- fmt.Printf("Restoring from backup %s\n", pat)
+ cmd.StdoutLogger.Printf("Restoring offline from backup %s\n", pat)
backupFiles, err := filepath.Glob(pat)
if err != nil {
@@ -289,67 +524,64 @@ func (cmd *Command) unpackTar(tarFile string) error {
}
defer f.Close()
- tr := tar.NewReader(f)
-
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- return nil
- } else if err != nil {
- return err
- }
-
- if err := cmd.unpackFile(tr, hdr.Name); err != nil {
- return err
- }
+ // should get us ["db","rp", "00001", "00"]
+ pathParts := strings.Split(filepath.Base(tarFile), ".")
+ if len(pathParts) != 4 {
+ return fmt.Errorf("backup tarfile name incorrect format")
}
-}
-// unpackFile will copy the current file from the tar archive to the data dir
-func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error {
- nativeFileName := filepath.FromSlash(fileName)
- fn := filepath.Join(cmd.datadir, nativeFileName)
- fmt.Printf("unpacking %s\n", fn)
-
- if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil {
- return fmt.Errorf("error making restore dir: %s", err.Error())
- }
+ shardPath := filepath.Join(cmd.datadir, pathParts[0], pathParts[1], strings.Trim(pathParts[2], "0"))
+ os.MkdirAll(shardPath, 0755)
- ff, err := os.Create(fn)
- if err != nil {
- return err
- }
- defer ff.Close()
-
- if _, err := io.Copy(ff, tr); err != nil {
- return err
- }
-
- return nil
+ return tarstream.Restore(f, shardPath)
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases,
-retention policies, or specific shards. The InfluxDB process must not be
-running during a restore.
+retention policies, or specific shards. Default mode requires the instance to be stopped before running, and will wipe
+ all databases from the system (e.g., for disaster recovery). The improved online and enterprise modes requires
+ the instance to be running, and the database name used must not already exist.
+
+Usage: influxd restore [-enterprise] [flags] PATH
-Usage: influxd restore [flags] PATH
+The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data
+Options:
-metadir
Optional. If set the metastore will be recovered to the given path.
-datadir
Optional. If set the restore process will recover the specified
database, retention policy or shard to the given directory.
-database
- Optional. Required if no metadir given. Will restore the database
- TSM files.
+ Optional. Required if no metadir given. Will restore a single database's data.
-retention
- Optional. If given, database is required. Will restore the retention policy's
- TSM files.
+ Optional. If given, -database is required. Will restore the retention policy's
+ data.
-shard
- Optional. If given, database and retention are required. Will restore the shard's
- TSM files.
+ Optional. If given, -database and -retention are required. Will restore the shard's
+ data.
+ -online
+ Optional. If given, the restore will be done using the new process, detailed below. All other arguments
+ above should be omitted.
+
+The -enterprise restore mode consumes files in an improved format that includes a file manifest.
+
+Options:
+ -host
+ The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'.
+ -db
+ Identifies the database from the backup that will be restored.
+ -newdb
+ The name of the database into which the archived data will be imported on the target system.
+ If not given, then the value of -db is used. The new database name must be unique to the target system.
+ -rp
+ Identifies the retention policy from the backup that will be restored. Requires that -db is set.
+ -newrp
+ The name of the retention policy that will be created on the target system. Requires that -rp is set.
+ If not given, the value of -rp is used.
+ -shard
+ Optional. If given, -db and -rp are required. Will restore the single shard's data.
`)
}
diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go
index 3c77ed23aa..8de99be6f0 100644
--- a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go
+++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go
@@ -330,8 +330,8 @@ func (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSe
// Convert "now()" to current time.
stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()})
- // Locally delete the series.
- return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition)
+ // Locally delete the series. The series will not be removed from the index.
+ return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition, false)
}
func (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error {
@@ -375,7 +375,7 @@ func (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeries
}
// Locally drop the series.
- return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition)
+ return e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition, true)
}
func (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error {
@@ -1375,7 +1375,7 @@ type TSDBStore interface {
DeleteDatabase(name string) error
DeleteMeasurement(database, name string) error
DeleteRetentionPolicy(database, name string) error
- DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error
+ DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr, removeIndex bool) error
DeleteShard(id uint64) error
MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error)
diff --git a/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go
index 3054c8cf61..95a79a5b59 100644
--- a/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go
+++ b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go
@@ -14,6 +14,7 @@ import (
// TSDBStoreMock is a mockable implementation of tsdb.Store.
type TSDBStoreMock struct {
BackupShardFn func(id uint64, since time.Time, w io.Writer) error
+ BackupSeriesFileFn func(database string, w io.Writer) error
ExportShardFn func(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error
CloseFn func() error
CreateShardFn func(database, policy string, shardID uint64, enabled bool) error
@@ -22,7 +23,7 @@ type TSDBStoreMock struct {
DeleteDatabaseFn func(name string) error
DeleteMeasurementFn func(database, name string) error
DeleteRetentionPolicyFn func(database, name string) error
- DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error
+ DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr, removeIndex bool) error
DeleteShardFn func(id uint64) error
DiskSizeFn func() (int64, error)
ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error)
@@ -51,6 +52,9 @@ type TSDBStoreMock struct {
func (s *TSDBStoreMock) BackupShard(id uint64, since time.Time, w io.Writer) error {
return s.BackupShardFn(id, since, w)
}
+func (s *TSDBStoreMock) BackupSeriesFile(database string, w io.Writer) error {
+ return s.BackupSeriesFileFn(database, w)
+}
func (s *TSDBStoreMock) ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error {
return s.ExportShardFn(id, ExportStart, ExportEnd, w)
}
@@ -73,8 +77,8 @@ func (s *TSDBStoreMock) DeleteMeasurement(database string, name string) error {
func (s *TSDBStoreMock) DeleteRetentionPolicy(database string, name string) error {
return s.DeleteRetentionPolicyFn(database, name)
}
-func (s *TSDBStoreMock) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error {
- return s.DeleteSeriesFn(database, sources, condition)
+func (s *TSDBStoreMock) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr, removeIndex bool) error {
+ return s.DeleteSeriesFn(database, sources, condition, removeIndex)
}
func (s *TSDBStoreMock) DeleteShard(shardID uint64) error {
return s.DeleteShardFn(shardID)
diff --git a/vendor/github.com/influxdata/influxdb/man/influx.txt b/vendor/github.com/influxdata/influxdb/man/influx.txt
index 59cfba0249..3a0bb62971 100644
--- a/vendor/github.com/influxdata/influxdb/man/influx.txt
+++ b/vendor/github.com/influxdata/influxdb/man/influx.txt
@@ -63,6 +63,9 @@ OPTIONS
-pretty::
Turns on pretty print format for the JSON format.
+-node ::
+ Specifies the data node that should be queried for data. This option is only valid on enterprise clusters.
+
-import::
Import a previous database export from a file. If specified, '-path ' must also be specified.
diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt
index 2ded24785e..20584f00f6 100644
--- a/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt
+++ b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt
@@ -3,11 +3,15 @@ influxd-restore(1)
NAME
----
-influxd-restore - Restores the metastore, databases, retention policies, or specific shards
+influxd-restore - Uses backups from the PATH to restore the metastore, databases, retention policies, or specific
+ shards. Default mode requires the instance to be stopped before running, and will wipe all databases from the
+ system (e.g., for disaster recovery). The improved online and enterprise modes requires the instance to be running,
+ and the database name used must not already exist.
+
SYNOPSIS
--------
-'influxd restore' [options] PATH
+'influxd restore' [-enterprise] [flags] PATH
DESCRIPTION
-----------
@@ -15,20 +19,52 @@ Uses backups from the PATH to restore the metastore, databases, retention polici
OPTIONS
-------
+The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data
+
-metadir ::
- If set, the metastore will be recovered to the given path. Optional.
+ Optional. If set the metastore will be recovered to the given path.
-datadir ::
- If set, the restore process will recover the specified database, retention policy, or shard to the given directory. Optional.
+ Optional. If set the restore process will recover the specified
+ destinationDatabase, retention policy or shard to the given directory.
-database ::
- Will restore the database TSM files. Required if no metadir is given. Optional.
+ Optional. Required if no metadir given. Will restore a single database's data.
-retention ::
- Will restore the retention policy's TSM files. If given, database is required. Optional.
+ Optional. If given, -database is required. Will restore the retention policy's
+ data.
+
+-shard ::
+ Optional. If given, -database and -retention are required. Will restore the shard's
+ data.
+
+-online::
+ Optional. If given, the restore will be done using the new process, detailed below. All other arguments
+ above should be omitted.
+
+The -enterprise restore mode consumes files in an improved format that includes a file manifest.
+
+Options:
+-host ::
+ The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'.
+
+-db ::
+ Identifies the database from the backup that will be restored.
+
+-newdb ::
+ The name of the database into which the archived data will be imported on the target system.
+ If not given, then the value of -db is used. The new database name must be unique to the target system.
+
+-rp ::
+ Identifies the retention policy from the backup that will be restored. Requires that -db is set.
+
+-newrp ::
+ The name of the retention policy that will be created on the target system. Requires that -rp is set.
+ If not given, the value of -rp is used.
-shard ::
- Will restore the shard's TSM files. If given, database and retention are required. Optional.
+ Optional. If given, -db and -rp are required. Will restore the single shard's data.
SEE ALSO
--------
diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go
index 508d73a6ba..ad80a816bf 100644
--- a/vendor/github.com/influxdata/influxdb/models/points.go
+++ b/vendor/github.com/influxdata/influxdb/models/points.go
@@ -281,8 +281,8 @@ func ParseKeyBytes(buf []byte) ([]byte, Tags) {
return buf[:i], tags
}
-func ParseTags(buf []byte) (Tags, error) {
- return parseTags(buf), nil
+func ParseTags(buf []byte) Tags {
+ return parseTags(buf)
}
func ParseName(buf []byte) ([]byte, error) {
@@ -1528,9 +1528,12 @@ func parseTags(buf []byte) Tags {
return nil
}
- tags := make(Tags, 0, bytes.Count(buf, []byte(",")))
+ tags := make(Tags, bytes.Count(buf, []byte(",")))
+ p := 0
walkTags(buf, func(key, value []byte) bool {
- tags = append(tags, NewTag(key, value))
+ tags[p].Key = key
+ tags[p].Value = value
+ p++
return true
})
return tags
diff --git a/vendor/github.com/influxdata/influxdb/models/points_test.go b/vendor/github.com/influxdata/influxdb/models/points_test.go
index 6234fd0203..ca1552101c 100644
--- a/vendor/github.com/influxdata/influxdb/models/points_test.go
+++ b/vendor/github.com/influxdata/influxdb/models/points_test.go
@@ -2381,6 +2381,13 @@ func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) {
}
}
+func BenchmarkParseTags(b *testing.B) {
+ tags := []byte("cpu,tag0=value0,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5")
+ for i := 0; i < b.N; i++ {
+ models.ParseTags(tags)
+ }
+}
+
func init() {
// Force uint support to be enabled for testing.
models.EnableUintSupport()
diff --git a/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go
new file mode 100644
index 0000000000..b1d5f2ad06
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go
@@ -0,0 +1,22 @@
+package binaryutil
+
+// VarintSize returns the number of bytes to varint encode x.
+// This code is copied from encoding/binary.PutVarint() with the buffer removed.
+func VarintSize(x int64) int {
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+ return UvarintSize(ux)
+}
+
+// UvarintSize returns the number of bytes to uvarint encode x.
+// This code is copied from encoding/binary.PutUvarint() with the buffer removed.
+func UvarintSize(x uint64) int {
+ i := 0
+ for x >= 0x80 {
+ x >>= 7
+ i++
+ }
+ return i + 1
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go
index 528ee87ffa..a93165b81a 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go
@@ -3,6 +3,7 @@ package bloom_test
import (
"encoding/binary"
"fmt"
+ "os"
"testing"
"github.com/influxdata/influxdb/pkg/bloom"
@@ -10,6 +11,10 @@ import (
// Ensure filter can insert values and verify they exist.
func TestFilter_InsertContains(t *testing.T) {
+ if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
+ t.Skip("Skipping test in short, race and appveyor mode.")
+ }
+
// Short, less comprehensive test.
testShortFilter_InsertContains(t)
diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go
index 3e80c794c1..c51b56532c 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go
@@ -11,12 +11,57 @@ func Sort(a [][]byte) {
sort.Sort(byteSlices(a))
}
+// SortDedup sorts the byte slice a and removes duplicates. The ret
+func SortDedup(a [][]byte) [][]byte {
+ if len(a) < 2 {
+ return a
+ }
+
+ Sort(a)
+
+ i, j := 0, 1
+ for j < len(a) {
+ if bytes.Compare(a[j-1], a[j]) != 0 {
+ a[i] = a[j-1]
+ i++
+ }
+ j++
+ }
+ a[i] = a[j-1]
+ i++
+ return a[:i]
+}
+
func IsSorted(a [][]byte) bool {
return sort.IsSorted(byteSlices(a))
}
+// SearchBytes performs a binary search for x in the sorted slice a.
func SearchBytes(a [][]byte, x []byte) int {
- return sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 })
+ // Define f(i) => bytes.Compare(a[i], x) < 0
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, len(a)
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if bytes.Compare(a[h], x) < 0 {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
+
+// Contains returns true if x is an element of the sorted slice a.
+func Contains(a [][]byte, x []byte) bool {
+ n := SearchBytes(a, x)
+ if n < len(a) {
+ return bytes.Compare(a[n], x) == 0
+ }
+ return false
}
// SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of
diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go
index 8334a2675e..dcc7df52d4 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go
@@ -3,8 +3,10 @@ package bytesutil_test
import (
"bytes"
"encoding/binary"
+ "strings"
"testing"
+ "github.com/google/go-cmp/cmp"
"github.com/influxdata/influxdb/pkg/bytesutil"
)
@@ -34,6 +36,94 @@ func TestSearchBytesFixed(t *testing.T) {
}
}
+func TestSearchBytes(t *testing.T) {
+ in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh")
+ tests := []struct {
+ name string
+ x string
+ exp int
+ }{
+ {"exists first", "bbb", 0},
+ {"exists middle", "eee", 2},
+ {"exists last", "hhh", 5},
+ {"not exists last", "zzz", 6},
+ {"not exists first", "aaa", 0},
+ {"not exists mid", "ddd", 2},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := bytesutil.SearchBytes(in, []byte(test.x))
+ if got != test.exp {
+ t.Errorf("got %d, expected %d", got, test.exp)
+ }
+ })
+ }
+}
+
+func TestContains(t *testing.T) {
+ in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh")
+ tests := []struct {
+ name string
+ x string
+ exp bool
+ }{
+ {"exists first", "bbb", true},
+ {"exists middle", "eee", true},
+ {"exists last", "hhh", true},
+ {"not exists last", "zzz", false},
+ {"not exists first", "aaa", false},
+ {"not exists mid", "ddd", false},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := bytesutil.Contains(in, []byte(test.x))
+ if got != test.exp {
+ t.Errorf("got %t, expected %t", got, test.exp)
+ }
+ })
+ }
+}
+
+func toByteSlices(s ...string) [][]byte {
+ r := make([][]byte, len(s))
+ for i, v := range s {
+ r[i] = []byte(v)
+ }
+ return r
+}
+
+func TestSortDedup(t *testing.T) {
+ tests := []struct {
+ name string
+ in [][]byte
+ exp [][]byte
+ }{
+ {
+ name: "mixed dupes",
+ in: toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba"),
+ exp: toByteSlices("aba", "bbb", "ccc"),
+ },
+ {
+ name: "no dupes",
+ in: toByteSlices("bbb", "ccc", "ddd"),
+ exp: toByteSlices("bbb", "ccc", "ddd"),
+ },
+ {
+ name: "dupe at end",
+ in: toByteSlices("ccc", "ccc", "aaa"),
+ exp: toByteSlices("aaa", "ccc"),
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ out := bytesutil.SortDedup(test.in)
+ if !cmp.Equal(out, test.exp) {
+ t.Error("invalid result")
+ }
+ })
+ }
+}
+
func TestPack_WidthOne_One(t *testing.T) {
a := make([]byte, 8)
@@ -130,3 +220,56 @@ func TestPack_WidthOne_LastFill(t *testing.T) {
}
}
}
+
+func BenchmarkSortDedup(b *testing.B) {
+ data := toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba")
+ in := append([][]byte{}, data...)
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ x := in
+ x = bytesutil.SortDedup(x)
+ copy(in, data)
+ }
+}
+
+func BenchmarkContains_True(b *testing.B) {
+ var in [][]byte
+ for i := 'a'; i <= 'z'; i++ {
+ in = append(in, []byte(strings.Repeat(string(i), 3)))
+ }
+ for i := 0; i < b.N; i++ {
+ bytesutil.Contains(in, []byte("xxx"))
+ }
+}
+
+func BenchmarkContains_False(b *testing.B) {
+ var in [][]byte
+ for i := 'a'; i <= 'z'; i++ {
+ in = append(in, []byte(strings.Repeat(string(i), 3)))
+ }
+ for i := 0; i < b.N; i++ {
+ bytesutil.Contains(in, []byte("a"))
+ }
+}
+
+func BenchmarkSearchBytes_Exists(b *testing.B) {
+ var in [][]byte
+ for i := 'a'; i <= 'z'; i++ {
+ in = append(in, []byte(strings.Repeat(string(i), 3)))
+ }
+ for i := 0; i < b.N; i++ {
+ bytesutil.SearchBytes(in, []byte("xxx"))
+ }
+}
+
+func BenchmarkSearchBytes_NotExits(b *testing.B) {
+ var in [][]byte
+ for i := 'a'; i <= 'z'; i++ {
+ in = append(in, []byte(strings.Repeat(string(i), 3)))
+ }
+ for i := 0; i < b.N; i++ {
+ bytesutil.SearchBytes(in, []byte("a"))
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go
index 49985bffeb..4a406db377 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go
@@ -9,7 +9,7 @@ import (
"golang.org/x/sys/unix"
)
-func Map(path string) ([]byte, error) {
+func Map(path string, sz int64) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -23,7 +23,12 @@ func Map(path string) ([]byte, error) {
return nil, nil
}
- data, err := unix.Mmap(int(f.Fd()), 0, int(fi.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+ // Use file size if map size is not passed in.
+ if sz == 0 {
+ sz = fi.Size()
+ }
+
+ data, err := unix.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go
index 91aecf4119..a182219a7c 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go
@@ -9,7 +9,7 @@ import (
)
func TestMap(t *testing.T) {
- data, err := mmap.Map("mmap_test.go")
+ data, err := mmap.Map("mmap_test.go", 0)
if err != nil {
t.Fatalf("Open: %v", err)
}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go
index 173ceed446..c9faafb8d4 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go
@@ -10,10 +10,11 @@ package mmap
import (
"os"
"syscall"
+ "unsafe"
)
// Map memory-maps a file.
-func Map(path string) ([]byte, error) {
+func Map(path string, sz int64) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -27,10 +28,21 @@ func Map(path string) ([]byte, error) {
return nil, nil
}
- data, err := syscall.Mmap(int(f.Fd()), 0, int(fi.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+ // Use file size if map size is not passed in.
+ if sz == 0 {
+ sz = fi.Size()
+ }
+
+ data, err := syscall.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return nil, err
}
+
+ if _, _, err := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&data[0])), uintptr(len(data)), uintptr(syscall.MADV_RANDOM)); err != 0 {
+ Unmap(data)
+ return nil, err
+ }
+
return data, nil
}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go
index 3eee592faa..8efe48daf5 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go
@@ -7,7 +7,7 @@ import (
)
// Map memory-maps a file.
-func Map(path string) ([]byte, error) {
+func Map(path string, sz int64) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -17,22 +17,29 @@ func Map(path string) ([]byte, error) {
fi, err := f.Stat()
if err != nil {
return nil, err
- } else if fi.Size() == 0 {
+ }
+
+ // Use file size if map size is not passed in.
+ // TODO(edd): test.
+ // if sz == 0 {
+ // }
+ sz = fi.Size()
+ if fi.Size() == 0 {
return nil, nil
}
- lo, hi := uint32(fi.Size()), uint32(fi.Size()>>32)
+ lo, hi := uint32(sz), uint32(sz>>32)
fmap, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, hi, lo, nil)
if err != nil {
return nil, err
}
defer syscall.CloseHandle(fmap)
- ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(fi.Size()))
+ ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
if err != nil {
return nil, err
}
- data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:fi.Size()]
+ data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:sz]
return data, nil
}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go
new file mode 100644
index 0000000000..d73352c6ea
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go
@@ -0,0 +1,36 @@
+package pprofutil
+
+import (
+ "os"
+ "runtime/pprof"
+)
+
+type Profile struct {
+ *pprof.Profile
+
+ Path string
+ Debug int
+}
+
+func NewProfile(name, path string, debug int) *Profile {
+ p := &Profile{Profile: pprof.NewProfile(name), Path: path, Debug: debug}
+ return p
+}
+
+func (p *Profile) Stop() {
+ f, err := os.Create(p.Path)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ if err := p.WriteTo(f, p.Debug); err != nil {
+ panic(err)
+ }
+
+ if err := f.Close(); err != nil {
+ panic(err)
+ }
+
+ println("pprof profile written:", p.Path)
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go
index 29ab3be589..0530715900 100644
--- a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go
+++ b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go
@@ -2,6 +2,7 @@ package rhh
import (
"bytes"
+ "encoding/binary"
"sort"
"github.com/cespare/xxhash"
@@ -241,6 +242,13 @@ func HashKey(key []byte) int64 {
return h
}
+// HashUint64 computes a hash of an int64. Hash is always non-zero.
+func HashUint64(key uint64) int64 {
+ buf := make([]byte, 8)
+ binary.BigEndian.PutUint64(buf, key)
+ return HashKey(buf)
+}
+
// Dist returns the probe distance for a hash in a slot index.
// NOTE: Capacity must be a power of 2.
func Dist(hash, i, capacity int64) int64 {
diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go
new file mode 100644
index 0000000000..304c2de8d9
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go
@@ -0,0 +1,398 @@
+// Generated by tmpl
+// https://github.com/benbjohnson/tmpl
+//
+// DO NOT EDIT!
+// Source: merge.gen.go.tmpl
+
+package slices
+
+import "bytes"
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSortedFloats(n ...[]float64) []float64 {
+ var result []float64
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSortedFloats(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([]float64, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+
+ if n[i][idxs[i]] < n[j][idxs[j]] {
+ j = i
+ } else if n[i][idxs[i]] == n[j][idxs[j]] {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+
+ if result[len(result)-1] < n[j][idxs[j]] {
+ result = append(result, n[j][idxs[j]])
+ } else if result[len(result)-1] == n[j][idxs[j]] {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+
+ idxs[j]++
+ }
+ return result
+}
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSortedInts(n ...[]int64) []int64 {
+ var result []int64
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSortedInts(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([]int64, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+
+ if n[i][idxs[i]] < n[j][idxs[j]] {
+ j = i
+ } else if n[i][idxs[i]] == n[j][idxs[j]] {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+
+ if result[len(result)-1] < n[j][idxs[j]] {
+ result = append(result, n[j][idxs[j]])
+ } else if result[len(result)-1] == n[j][idxs[j]] {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+
+ idxs[j]++
+ }
+ return result
+}
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSortedUInts(n ...[]uint64) []uint64 {
+ var result []uint64
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSortedUInts(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([]uint64, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+
+ if n[i][idxs[i]] < n[j][idxs[j]] {
+ j = i
+ } else if n[i][idxs[i]] == n[j][idxs[j]] {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+
+ if result[len(result)-1] < n[j][idxs[j]] {
+ result = append(result, n[j][idxs[j]])
+ } else if result[len(result)-1] == n[j][idxs[j]] {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+
+ idxs[j]++
+ }
+ return result
+}
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSortedStrings(n ...[]string) []string {
+ var result []string
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSortedStrings(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([]string, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+
+ if n[i][idxs[i]] < n[j][idxs[j]] {
+ j = i
+ } else if n[i][idxs[i]] == n[j][idxs[j]] {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+
+ if result[len(result)-1] < n[j][idxs[j]] {
+ result = append(result, n[j][idxs[j]])
+ } else if result[len(result)-1] == n[j][idxs[j]] {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+
+ idxs[j]++
+ }
+ return result
+}
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSortedBytes(n ...[][]byte) [][]byte {
+ var result [][]byte
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSortedBytes(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([][]byte, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+
+ var cmp int // Result of comparing most recent value.
+
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+
+ cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]])
+ if cmp == -1 {
+ j = i
+ } else if cmp == 0 {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+
+ cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]])
+ if cmp == -1 {
+ result = append(result, n[j][idxs[j]])
+ } else if cmp == 0 {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+
+ idxs[j]++
+ }
+ return result
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl
new file mode 100644
index 0000000000..8e40a656d6
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl
@@ -0,0 +1,104 @@
+package slices
+
+import "bytes"
+
+{{with $types := .}}{{range $k := $types}}
+
+// Merge uses a k-way merge to merge n collections of sorted byte slices.
+//
+// The resulting slice is returned in ascending order, with any duplicate values
+// removed.
+func MergeSorted{{$k.Name}}(n ...[]{{$k.Type}}) []{{$k.Type}} {
+ var result []{{$k.Type}}
+ if len(n) == 0 {
+ return nil
+ } else if len(n) == 1 {
+ // Special case. Merge single slice with a nil slice, to remove any
+ // duplicates from the single slice.
+ return MergeSorted{{$k.Name}}(n[0], nil)
+ }
+
+ var maxSize int
+ for _, a := range n {
+ if len(a) > maxSize {
+ maxSize = len(a)
+ }
+ }
+ result = make([]{{$k.Type}}, 0, maxSize) // This will likely be too small but it's a start.
+
+ idxs := make([]int, len(n)) // Indexes we've processed.
+ var j int // Index we currently think is minimum.
+{{if eq $k.Name "Bytes" }}
+ var cmp int // Result of comparing most recent value.
+{{end}}
+ for {
+ j = -1
+
+ // Find the smallest minimum in all slices.
+ for i := 0; i < len(n); i++ {
+ if idxs[i] >= len(n[i]) {
+ continue // We have completely drained all values in this slice.
+ } else if j == -1 {
+ // We haven't picked the minimum value yet. Pick this one.
+ j = i
+ continue
+ }
+
+ // It this value key is lower than the candidate.
+{{if eq $k.Name "Bytes" }}
+ cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]])
+ if cmp == -1 {
+ j = i
+ } else if cmp == 0 {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+{{else}}
+ if n[i][idxs[i]] < n[j][idxs[j]] {
+ j = i
+ } else if n[i][idxs[i]] == n[j][idxs[j]] {
+ // Duplicate value. Throw it away.
+ idxs[i]++
+ }
+{{end}}
+ }
+
+ // We could have drained all of the values and be done...
+ if j == -1 {
+ break
+ }
+
+ // First value to just append it and move on.
+ if len(result) == 0 {
+ result = append(result, n[j][idxs[j]])
+ idxs[j]++
+ continue
+ }
+
+ // Append the minimum value to results if it's not a duplicate of
+ // the existing one.
+{{if eq $k.Name "Bytes" }}
+ cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]])
+ if cmp == -1 {
+ result = append(result, n[j][idxs[j]])
+ } else if cmp == 0 {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+{{else}}
+ if result[len(result)-1] < n[j][idxs[j]] {
+ result = append(result, n[j][idxs[j]])
+ } else if result[len(result)-1] == n[j][idxs[j]] {
+ // Duplicate so drop it.
+ } else {
+ panic("value being merged out of order.")
+ }
+{{end}}
+ idxs[j]++
+ }
+ return result
+}
+
+
+{{end}}{{end}}
\ No newline at end of file
diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go
new file mode 100644
index 0000000000..55f97deff6
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go
@@ -0,0 +1,101 @@
+package slices_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/influxdata/influxdb/pkg/slices"
+)
+
+func TestMergeSortedBytes(t *testing.T) {
+ cases := []struct {
+ Inputs [][][]byte
+ Out [][]byte
+ }{
+ {Inputs: [][][]byte{}},
+ {Inputs: [][][]byte{toBytes(0)}, Out: toBytes(0)},
+ {
+ Inputs: [][][]byte{toBytes(2), [][]byte(nil), toBytes(2)},
+ Out: toBytes(2),
+ },
+ {
+ Inputs: [][][]byte{toBytes(9), toBytes(1, 16, 16), toBytes(5, 10)},
+ Out: toBytes(1, 5, 9, 10, 16),
+ },
+ {
+ Inputs: [][][]byte{toBytes(20), toBytes(16), toBytes(10)},
+ Out: toBytes(10, 16, 20),
+ },
+ {
+ Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2)},
+ Out: toBytes(2),
+ },
+ {
+ Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2), [][]byte(nil), [][]byte(nil), [][]byte(nil)},
+ Out: toBytes(2),
+ },
+ {
+ Inputs: [][][]byte{toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5)},
+ Out: toBytes(1, 2, 3, 4, 5),
+ },
+ }
+
+ for i, c := range cases {
+ t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) {
+ if got, exp := slices.MergeSortedBytes(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) {
+ t.Fatalf("got %v, expected %v", got, exp)
+ }
+ })
+ }
+}
+
+func toBytes(a ...int) [][]byte {
+ var result [][]byte
+ for _, v := range a {
+ result = append(result, []byte{byte(v)})
+ }
+ return result
+}
+
+func TestMergeSortedInts(t *testing.T) {
+ cases := []struct {
+ Inputs [][]int64
+ Out []int64
+ }{
+ {Inputs: [][]int64{}},
+ {Inputs: [][]int64{[]int64{0}}, Out: []int64{0}},
+ {
+ Inputs: [][]int64{[]int64{2}, []int64(nil), []int64{2}},
+ Out: []int64{2},
+ },
+ {
+ Inputs: [][]int64{[]int64{9}, []int64{1, 16, 16}, []int64{5, 10}},
+ Out: []int64{1, 5, 9, 10, 16},
+ },
+ {
+ Inputs: [][]int64{[]int64{20}, []int64{16}, []int64{10}},
+ Out: []int64{10, 16, 20},
+ },
+ {
+ Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}},
+ Out: []int64{2},
+ },
+ {
+ Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}, []int64(nil), []int64(nil), []int64(nil)},
+ Out: []int64{2},
+ },
+ {
+ Inputs: [][]int64{[]int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}},
+ Out: []int64{1, 2, 3, 4, 5},
+ },
+ }
+
+ for i, c := range cases {
+ t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) {
+ if got, exp := slices.MergeSortedInts(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) {
+ t.Fatalf("got %v, expected %v", got, exp)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata
new file mode 100644
index 0000000000..f4786858a5
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata
@@ -0,0 +1,22 @@
+[
+ {
+ "Name":"Floats",
+ "Type":"float64"
+ },
+ {
+ "Name":"Ints",
+ "Type":"int64"
+ },
+ {
+ "Name":"UInts",
+ "Type":"uint64"
+ },
+ {
+ "Name":"Strings",
+ "Type":"string"
+ },
+ {
+ "Name":"Bytes",
+ "Type":"[]byte"
+ }
+]
diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go
new file mode 100644
index 0000000000..6ae43ce47a
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package tar
+
+import "os"
+
+func syncDir(dirName string) error {
+ // fsync the dir to flush the rename
+ dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+ return dir.Sync()
+}
+
+// renameFile renames the file at oldpath to newpath.
+func renameFile(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go
new file mode 100644
index 0000000000..2402d127d2
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go
@@ -0,0 +1,19 @@
+package tar
+
+import "os"
+
+func syncDir(dirName string) error {
+ return nil
+}
+
+// renameFile renames the file at oldpath to newpath.
+// If newpath already exists, it will be removed before renaming.
+func renameFile(oldpath, newpath string) error {
+ if _, err := os.Stat(newpath); err == nil {
+ if err = os.Remove(newpath); nil != err {
+ return err
+ }
+ }
+
+ return os.Rename(oldpath, newpath)
+}
diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go
new file mode 100644
index 0000000000..11f1e6ed0d
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go
@@ -0,0 +1,160 @@
+package tar
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// Stream is a convenience function for creating a tar of a shard dir. It walks over the directory and subdirs,
+// possibly writing each file to a tar writer stream. By default StreamFile is used, which will result in all files
+// being written. A custom writeFunc can be passed so that each file may be written, modified+written, or skipped
+// depending on the custom logic.
+func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error) error {
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+
+ if writeFunc == nil {
+ writeFunc = StreamFile
+ }
+
+ return filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Skip adding an entry for the root dir
+ if dir == path && f.IsDir() {
+ return nil
+ }
+
+ // Figure out the the full relative path including any sub-dirs
+ subDir, _ := filepath.Split(path)
+ subDir, err = filepath.Rel(dir, subDir)
+ if err != nil {
+ return err
+ }
+
+ return writeFunc(f, filepath.Join(relativePath, subDir), path, tw)
+ })
+}
+
+// Generates a filtering function for Stream that checks an incoming file, and only writes the file to the stream if
+// its mod time is later than since. Example: to tar only files newer than a certain datetime, use
+// tar.Stream(w, dir, relativePath, SinceFilterTarFile(datetime))
+func SinceFilterTarFile(since time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
+ return func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
+ if f.ModTime().After(since) {
+ return StreamFile(f, shardRelativePath, fullPath, tw)
+ }
+ return nil
+ }
+}
+
+// stream a single file to tw, extending the header name using the shardRelativePath
+func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
+ h, err := tar.FileInfoHeader(f, f.Name())
+ if err != nil {
+ return err
+ }
+ h.Name = filepath.ToSlash(filepath.Join(shardRelativePath, f.Name()))
+
+ if err := tw.WriteHeader(h); err != nil {
+ return err
+ }
+
+ if !f.Mode().IsRegular() {
+ return nil
+ }
+
+ fr, err := os.Open(fullPath)
+ if err != nil {
+ return err
+ }
+
+ defer fr.Close()
+
+ _, err = io.CopyN(tw, fr, h.Size)
+
+ return err
+}
+
+// Restore reads a tar archive from r and extracts all of its files into dir,
+// using only the base name of each file.
+func Restore(r io.Reader, dir string) error {
+ tr := tar.NewReader(r)
+ for {
+ if err := extractFile(tr, dir); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ }
+
+ return syncDir(dir)
+}
+
+// extractFile copies the next file from tr into dir, using the file's base name.
+func extractFile(tr *tar.Reader, dir string) error {
+ // Read next archive file.
+ hdr, err := tr.Next()
+ if err != nil {
+ return err
+ }
+
+ // The hdr.Name is the relative path of the file from the root data dir.
+ // e.g (db/rp/1/xxxxx.tsm or db/rp/1/index/xxxxxx.tsi)
+ sections := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator))
+ if len(sections) < 3 {
+ return fmt.Errorf("invalid archive path: %s", hdr.Name)
+ }
+
+ relativePath := filepath.Join(sections[3:]...)
+
+ subDir, _ := filepath.Split(relativePath)
+ // If this is a directory entry (usually just `index` for tsi), create it an move on.
+ if hdr.Typeflag == tar.TypeDir {
+ if err := os.MkdirAll(filepath.Join(dir, subDir), os.FileMode(hdr.Mode).Perm()); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Make sure the dir we need to write into exists. It should, but just double check in
+ // case we get a slightly invalid tarball.
+ if subDir != "" {
+ if err := os.MkdirAll(filepath.Join(dir, subDir), 0755); err != nil {
+ return err
+ }
+ }
+
+ destPath := filepath.Join(dir, relativePath)
+ tmp := destPath + ".tmp"
+
+ // Create new file on disk.
+ f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode).Perm())
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Copy from archive to the file.
+ if _, err := io.CopyN(f, tr, hdr.Size); err != nil {
+ return err
+ }
+
+ // Sync to disk & close.
+ if err := f.Sync(); err != nil {
+ return err
+ }
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+
+ return renameFile(tmp, destPath)
+}
diff --git a/vendor/github.com/influxdata/influxdb/prometheus/converters.go b/vendor/github.com/influxdata/influxdb/prometheus/converters.go
index dc65b014ad..eada5bf019 100644
--- a/vendor/github.com/influxdata/influxdb/prometheus/converters.go
+++ b/vendor/github.com/influxdata/influxdb/prometheus/converters.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math"
+ "regexp"
"time"
"github.com/influxdata/influxdb/models"
@@ -93,6 +94,8 @@ func ReadRequestToInfluxQLQuery(req *remote.ReadRequest, db, rp string) (*influx
// condFromMatcher converts a Prometheus LabelMatcher into an equivalent InfluxQL BinaryExpr
func condFromMatcher(m *remote.LabelMatcher) (*influxql.BinaryExpr, error) {
var op influxql.Token
+ var rhs influxql.Expr
+
switch m.Type {
case remote.MatchType_EQUAL:
op = influxql.EQ
@@ -106,10 +109,22 @@ func condFromMatcher(m *remote.LabelMatcher) (*influxql.BinaryExpr, error) {
return nil, fmt.Errorf("unknown match type %v", m.Type)
}
+ if op == influxql.EQREGEX || op == influxql.NEQREGEX {
+ re, err := regexp.Compile(m.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert regex values to InfluxDB format.
+ rhs = &influxql.RegexLiteral{Val: re}
+ } else {
+ rhs = &influxql.StringLiteral{Val: m.Value}
+ }
+
return &influxql.BinaryExpr{
Op: op,
LHS: &influxql.VarRef{Val: m.Name},
- RHS: &influxql.StringLiteral{Val: m.Value},
+ RHS: rhs,
}, nil
}
diff --git a/vendor/github.com/influxdata/influxdb/prometheus/converters_test.go b/vendor/github.com/influxdata/influxdb/prometheus/converters_test.go
new file mode 100644
index 0000000000..308e57c945
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/prometheus/converters_test.go
@@ -0,0 +1,97 @@
+package prometheus_test
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+
+ "github.com/influxdata/influxdb/prometheus"
+ "github.com/influxdata/influxdb/prometheus/remote"
+ "github.com/influxdata/influxql"
+)
+
+func TestReadRequestToInfluxQLQuery(t *testing.T) {
+ examples := []struct {
+ name string
+ queries []*remote.Query
+ expQuery string
+ expError error
+ }{
+ {
+ name: "too many queries",
+ queries: []*remote.Query{{}, {}}, // Multiple queries
+ expError: errors.New("Prometheus read endpoint currently only supports one query at a time"),
+ },
+ {
+ name: "single condition",
+ queries: []*remote.Query{{
+ StartTimestampMs: 1,
+ EndTimestampMs: 100,
+ Matchers: []*remote.LabelMatcher{
+ {Name: "region", Value: "west", Type: remote.MatchType_EQUAL},
+ },
+ }},
+ expQuery: "SELECT f64 FROM db0.rp0._ WHERE region = 'west' AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.1Z' GROUP BY *",
+ },
+ {
+ name: "multiple conditions",
+ queries: []*remote.Query{{
+ StartTimestampMs: 1,
+ EndTimestampMs: 100,
+ Matchers: []*remote.LabelMatcher{
+ {Name: "region", Value: "west", Type: remote.MatchType_EQUAL},
+ {Name: "host", Value: "serverA", Type: remote.MatchType_NOT_EQUAL},
+ },
+ }},
+ expQuery: "SELECT f64 FROM db0.rp0._ WHERE region = 'west' AND host != 'serverA' AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.1Z' GROUP BY *",
+ },
+ {
+ name: "rewrite regex",
+ queries: []*remote.Query{{
+ StartTimestampMs: 1,
+ EndTimestampMs: 100,
+ Matchers: []*remote.LabelMatcher{
+ {Name: "region", Value: "c.*", Type: remote.MatchType_REGEX_MATCH},
+ {Name: "host", Value: `\d`, Type: remote.MatchType_REGEX_NO_MATCH},
+ },
+ }},
+ expQuery: `SELECT f64 FROM db0.rp0._ WHERE region =~ /c.*/ AND host !~ /\d/ AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.1Z' GROUP BY *`,
+ },
+ {
+ name: "escape regex",
+ queries: []*remote.Query{{
+ StartTimestampMs: 1,
+ EndTimestampMs: 100,
+ Matchers: []*remote.LabelMatcher{
+ {Name: "test_type", Value: "a/b", Type: remote.MatchType_REGEX_MATCH},
+ },
+ }},
+ expQuery: `SELECT f64 FROM db0.rp0._ WHERE test_type =~ /a\/b/ AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.1Z' GROUP BY *`,
+ },
+ }
+
+ for _, example := range examples {
+ t.Run(example.name, func(t *testing.T) {
+ readRequest := &remote.ReadRequest{Queries: example.queries}
+ query, err := prometheus.ReadRequestToInfluxQLQuery(readRequest, "db0", "rp0")
+ if !reflect.DeepEqual(err, example.expError) {
+ t.Errorf("got error %v, expected %v", err, example.expError)
+ }
+
+ var queryString string
+ if query != nil {
+ queryString = query.String()
+ }
+
+ if queryString != example.expQuery {
+ t.Errorf("got query %v, expected %v", queryString, example.expQuery)
+ }
+
+ if queryString != "" {
+ if _, err := influxql.ParseStatement(queryString); err != nil {
+ t.Error(err)
+ }
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go b/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go
index 0b2ba473db..242245ec85 100644
--- a/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go
+++ b/vendor/github.com/influxdata/influxdb/query/internal/internal.pb.go
@@ -554,7 +554,7 @@ func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorIntern
var fileDescriptorInternal = []byte{
// 796 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x6d, 0x6f, 0xe3, 0x44,
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x55, 0x6d, 0x6f, 0xe3, 0x44,
0x10, 0x96, 0xe3, 0x3a, 0x8d, 0x27, 0xcd, 0xf5, 0x58, 0x4a, 0x59, 0xa1, 0x13, 0xb2, 0x2c, 0x40,
0x16, 0xa0, 0x22, 0xf5, 0x13, 0x9f, 0x90, 0x72, 0xf4, 0x8a, 0x2a, 0xdd, 0xb5, 0xa7, 0x4d, 0xe9,
0xf7, 0x25, 0x9e, 0x5a, 0x2b, 0x39, 0xeb, 0xb0, 0x5e, 0xa3, 0xe4, 0x07, 0xf4, 0x87, 0xf1, 0x13,
diff --git a/vendor/github.com/influxdata/influxdb/query/query_executor.go b/vendor/github.com/influxdata/influxdb/query/query_executor.go
index 885566a260..08ae6b673c 100644
--- a/vendor/github.com/influxdata/influxdb/query/query_executor.go
+++ b/vendor/github.com/influxdata/influxdb/query/query_executor.go
@@ -508,6 +508,8 @@ func (q *QueryTask) monitor(fn QueryMonitorFunc) {
func (q *QueryTask) close() {
q.mu.Lock()
if q.status != KilledTask {
+ // Set the status to killed to prevent closing the channel twice.
+ q.status = KilledTask
close(q.closing)
}
q.mu.Unlock()
diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go b/vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go
index 18e73e7b37..fa929498e5 100644
--- a/vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go
+++ b/vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go
@@ -615,7 +615,7 @@ func TestHandler_PromRead(t *testing.T) {
h := NewHandler(false)
h.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx query.ExecutionContext) error {
- if stmt.String() != `SELECT f64 FROM foo.._ WHERE eq = 'a' AND neq != 'b' AND regex =~ 'c' AND neqregex !~ 'd' AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.002Z' GROUP BY *` {
+ if stmt.String() != `SELECT f64 FROM foo.._ WHERE eq = 'a' AND neq != 'b' AND regex =~ /c/ AND neqregex !~ /d/ AND time >= '1970-01-01T00:00:00.001Z' AND time <= '1970-01-01T00:00:00.002Z' GROUP BY *` {
t.Fatalf("unexpected query: %s", stmt.String())
} else if ctx.Database != `foo` {
t.Fatalf("unexpected db: %s", ctx.Database)
diff --git a/vendor/github.com/influxdata/influxdb/services/meta/data.go b/vendor/github.com/influxdata/influxdb/services/meta/data.go
index d3e191d1da..b553a2b17b 100644
--- a/vendor/github.com/influxdata/influxdb/services/meta/data.go
+++ b/vendor/github.com/influxdata/influxdb/services/meta/data.go
@@ -10,6 +10,7 @@ import (
"time"
"unicode"
+ "fmt"
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb"
"github.com/influxdata/influxdb/models"
@@ -783,6 +784,106 @@ func (data *Data) hasAdminUser() bool {
return false
}
+// ImportData imports selected data into the current metadata.
+// if non-empty, backupDBName, restoreDBName, backupRPName, restoreRPName can be used to select DB metadata from other,
+// and to assign a new name to the imported data. Returns a map of shard ID's in the old metadata to new shard ID's
+// in the new metadata, along with a list of new databases created, both of which can assist in the import of existing
+// shard data during a database restore.
+func (data *Data) ImportData(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string) (map[uint64]uint64, []string, error) {
+ shardIDMap := make(map[uint64]uint64)
+ if backupDBName != "" {
+ dbName, err := data.importOneDB(other, backupDBName, restoreDBName, backupRPName, restoreRPName, shardIDMap)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return shardIDMap, []string{dbName}, nil
+ }
+
+ // if no backupDBName then we'll try to import all the DB's. If one of them fails, we'll mark the whole
+ // operation a failure and return an error.
+ var newDBs []string
+ for _, dbi := range other.Databases {
+ if dbi.Name == "_internal" {
+ continue
+ }
+ dbName, err := data.importOneDB(other, dbi.Name, "", "", "", shardIDMap)
+ if err != nil {
+ return nil, nil, err
+ }
+ newDBs = append(newDBs, dbName)
+ }
+ return shardIDMap, newDBs, nil
+}
+
+// importOneDB imports a single database/rp from an external metadata object, renaming them if new names are provided.
+func (data *Data) importOneDB(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string, shardIDMap map[uint64]uint64) (string, error) {
+
+ dbPtr := other.Database(backupDBName)
+ if dbPtr == nil {
+ return "", fmt.Errorf("imported metadata does not have datbase named %s", backupDBName)
+ }
+
+ if restoreDBName == "" {
+ restoreDBName = backupDBName
+ }
+
+ if data.Database(restoreDBName) != nil {
+ return "", errors.New("database already exists")
+ }
+
+ // change the names if we want/need to
+ err := data.CreateDatabase(restoreDBName)
+ if err != nil {
+ return "", err
+ }
+ dbImport := data.Database(restoreDBName)
+
+ if backupRPName != "" {
+ rpPtr := dbPtr.RetentionPolicy(backupRPName)
+
+ if rpPtr != nil {
+ rpImport := rpPtr.clone()
+ if restoreRPName == "" {
+ restoreRPName = backupRPName
+ }
+ rpImport.Name = restoreRPName
+ dbImport.RetentionPolicies = []RetentionPolicyInfo{rpImport}
+ dbImport.DefaultRetentionPolicy = restoreRPName
+ } else {
+ return "", fmt.Errorf("retention Policy not found in meta backup: %s.%s", backupDBName, backupRPName)
+ }
+
+ } else { // import all RP's without renaming
+ dbImport.DefaultRetentionPolicy = dbPtr.DefaultRetentionPolicy
+ if dbPtr.RetentionPolicies != nil {
+ dbImport.RetentionPolicies = make([]RetentionPolicyInfo, len(dbPtr.RetentionPolicies))
+ for i := range dbPtr.RetentionPolicies {
+ dbImport.RetentionPolicies[i] = dbPtr.RetentionPolicies[i].clone()
+ }
+ }
+
+ }
+
+ // renumber the shard groups and shards for the new retention policy(ies)
+ for _, rpImport := range dbImport.RetentionPolicies {
+ for j, sgImport := range rpImport.ShardGroups {
+ data.MaxShardGroupID++
+ rpImport.ShardGroups[j].ID = data.MaxShardGroupID
+ for k, _ := range sgImport.Shards {
+ data.MaxShardID++
+ shardIDMap[sgImport.Shards[k].ID] = data.MaxShardID
+ sgImport.Shards[k].ID = data.MaxShardID
+ // OSS doesn't use Owners but if we are importing this from Enterprise, we'll want to clear it out
+ // to avoid any issues if they ever export this DB again to bring back to Enterprise.
+ sgImport.Shards[k].Owners = []ShardOwner{}
+ }
+ }
+ }
+
+ return restoreDBName, nil
+}
+
// NodeInfo represents information about a single node in the cluster.
type NodeInfo struct {
ID uint64
diff --git a/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go
index cd9130ef71..403b868317 100644
--- a/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go
+++ b/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go
@@ -745,6 +745,7 @@ var E_CreateNodeCommand_Command = &proto.ExtensionDesc{
Field: 101,
Name: "meta.CreateNodeCommand.command",
Tag: "bytes,101,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DeleteNodeCommand struct {
@@ -778,6 +779,7 @@ var E_DeleteNodeCommand_Command = &proto.ExtensionDesc{
Field: 102,
Name: "meta.DeleteNodeCommand.command",
Tag: "bytes,102,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateDatabaseCommand struct {
@@ -811,6 +813,7 @@ var E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{
Field: 103,
Name: "meta.CreateDatabaseCommand.command",
Tag: "bytes,103,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropDatabaseCommand struct {
@@ -836,6 +839,7 @@ var E_DropDatabaseCommand_Command = &proto.ExtensionDesc{
Field: 104,
Name: "meta.DropDatabaseCommand.command",
Tag: "bytes,104,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateRetentionPolicyCommand struct {
@@ -871,6 +875,7 @@ var E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 105,
Name: "meta.CreateRetentionPolicyCommand.command",
Tag: "bytes,105,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropRetentionPolicyCommand struct {
@@ -904,6 +909,7 @@ var E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 106,
Name: "meta.DropRetentionPolicyCommand.command",
Tag: "bytes,106,opt,name=command",
+ Filename: "internal/meta.proto",
}
type SetDefaultRetentionPolicyCommand struct {
@@ -939,6 +945,7 @@ var E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 107,
Name: "meta.SetDefaultRetentionPolicyCommand.command",
Tag: "bytes,107,opt,name=command",
+ Filename: "internal/meta.proto",
}
type UpdateRetentionPolicyCommand struct {
@@ -998,6 +1005,7 @@ var E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
Field: 108,
Name: "meta.UpdateRetentionPolicyCommand.command",
Tag: "bytes,108,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateShardGroupCommand struct {
@@ -1039,6 +1047,7 @@ var E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{
Field: 109,
Name: "meta.CreateShardGroupCommand.command",
Tag: "bytes,109,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DeleteShardGroupCommand struct {
@@ -1080,6 +1089,7 @@ var E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{
Field: 110,
Name: "meta.DeleteShardGroupCommand.command",
Tag: "bytes,110,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateContinuousQueryCommand struct {
@@ -1123,6 +1133,7 @@ var E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{
Field: 111,
Name: "meta.CreateContinuousQueryCommand.command",
Tag: "bytes,111,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropContinuousQueryCommand struct {
@@ -1156,6 +1167,7 @@ var E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{
Field: 112,
Name: "meta.DropContinuousQueryCommand.command",
Tag: "bytes,112,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateUserCommand struct {
@@ -1197,6 +1209,7 @@ var E_CreateUserCommand_Command = &proto.ExtensionDesc{
Field: 113,
Name: "meta.CreateUserCommand.command",
Tag: "bytes,113,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropUserCommand struct {
@@ -1222,6 +1235,7 @@ var E_DropUserCommand_Command = &proto.ExtensionDesc{
Field: 114,
Name: "meta.DropUserCommand.command",
Tag: "bytes,114,opt,name=command",
+ Filename: "internal/meta.proto",
}
type UpdateUserCommand struct {
@@ -1255,6 +1269,7 @@ var E_UpdateUserCommand_Command = &proto.ExtensionDesc{
Field: 115,
Name: "meta.UpdateUserCommand.command",
Tag: "bytes,115,opt,name=command",
+ Filename: "internal/meta.proto",
}
type SetPrivilegeCommand struct {
@@ -1296,6 +1311,7 @@ var E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{
Field: 116,
Name: "meta.SetPrivilegeCommand.command",
Tag: "bytes,116,opt,name=command",
+ Filename: "internal/meta.proto",
}
type SetDataCommand struct {
@@ -1321,6 +1337,7 @@ var E_SetDataCommand_Command = &proto.ExtensionDesc{
Field: 117,
Name: "meta.SetDataCommand.command",
Tag: "bytes,117,opt,name=command",
+ Filename: "internal/meta.proto",
}
type SetAdminPrivilegeCommand struct {
@@ -1354,6 +1371,7 @@ var E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{
Field: 118,
Name: "meta.SetAdminPrivilegeCommand.command",
Tag: "bytes,118,opt,name=command",
+ Filename: "internal/meta.proto",
}
type UpdateNodeCommand struct {
@@ -1387,6 +1405,7 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{
Field: 119,
Name: "meta.UpdateNodeCommand.command",
Tag: "bytes,119,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateSubscriptionCommand struct {
@@ -1444,6 +1463,7 @@ var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{
Field: 121,
Name: "meta.CreateSubscriptionCommand.command",
Tag: "bytes,121,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropSubscriptionCommand struct {
@@ -1485,6 +1505,7 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
Field: 122,
Name: "meta.DropSubscriptionCommand.command",
Tag: "bytes,122,opt,name=command",
+ Filename: "internal/meta.proto",
}
type RemovePeerCommand struct {
@@ -1518,6 +1539,7 @@ var E_RemovePeerCommand_Command = &proto.ExtensionDesc{
Field: 123,
Name: "meta.RemovePeerCommand.command",
Tag: "bytes,123,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateMetaNodeCommand struct {
@@ -1559,6 +1581,7 @@ var E_CreateMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 124,
Name: "meta.CreateMetaNodeCommand.command",
Tag: "bytes,124,opt,name=command",
+ Filename: "internal/meta.proto",
}
type CreateDataNodeCommand struct {
@@ -1592,6 +1615,7 @@ var E_CreateDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 125,
Name: "meta.CreateDataNodeCommand.command",
Tag: "bytes,125,opt,name=command",
+ Filename: "internal/meta.proto",
}
type UpdateDataNodeCommand struct {
@@ -1633,6 +1657,7 @@ var E_UpdateDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 126,
Name: "meta.UpdateDataNodeCommand.command",
Tag: "bytes,126,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DeleteMetaNodeCommand struct {
@@ -1658,6 +1683,7 @@ var E_DeleteMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 127,
Name: "meta.DeleteMetaNodeCommand.command",
Tag: "bytes,127,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DeleteDataNodeCommand struct {
@@ -1683,6 +1709,7 @@ var E_DeleteDataNodeCommand_Command = &proto.ExtensionDesc{
Field: 128,
Name: "meta.DeleteDataNodeCommand.command",
Tag: "bytes,128,opt,name=command",
+ Filename: "internal/meta.proto",
}
type Response struct {
@@ -1759,6 +1786,7 @@ var E_SetMetaNodeCommand_Command = &proto.ExtensionDesc{
Field: 129,
Name: "meta.SetMetaNodeCommand.command",
Tag: "bytes,129,opt,name=command",
+ Filename: "internal/meta.proto",
}
type DropShardCommand struct {
@@ -1784,6 +1812,7 @@ var E_DropShardCommand_Command = &proto.ExtensionDesc{
Field: 130,
Name: "meta.DropShardCommand.command",
Tag: "bytes,130,opt,name=command",
+ Filename: "internal/meta.proto",
}
func init() {
@@ -1865,107 +1894,118 @@ func init() {
func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) }
var fileDescriptorMeta = []byte{
- // 1617 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc5,
- 0x17, 0xd7, 0xda, 0x6b, 0xc7, 0x7b, 0x62, 0x27, 0xf6, 0x38, 0x97, 0x4d, 0x9b, 0xa4, 0xee, 0xe8,
- 0x7f, 0xf1, 0xff, 0x2f, 0x51, 0x24, 0x2b, 0x15, 0x42, 0x5c, 0xdb, 0xb8, 0xa5, 0x11, 0x4a, 0x1a,
- 0x62, 0x17, 0xde, 0xaa, 0x6e, 0xed, 0x49, 0xb3, 0x60, 0xef, 0x9a, 0xdd, 0x75, 0xd3, 0x50, 0x68,
- 0x03, 0x12, 0x42, 0x20, 0x21, 0xc1, 0x0b, 0x2f, 0x3c, 0xf1, 0xc6, 0x37, 0x40, 0x3c, 0xf0, 0x29,
- 0xf8, 0x42, 0x68, 0x66, 0xf6, 0x32, 0xbb, 0x3b, 0xb3, 0x69, 0xfb, 0x66, 0xcf, 0x39, 0x73, 0x7e,
- 0xbf, 0x39, 0xb7, 0x39, 0xb3, 0xd0, 0xb6, 0x9d, 0x80, 0x78, 0x8e, 0x35, 0x79, 0x7d, 0x4a, 0x02,
- 0xeb, 0xda, 0xcc, 0x73, 0x03, 0x17, 0xe9, 0xf4, 0x37, 0xfe, 0xad, 0x04, 0x7a, 0xdf, 0x0a, 0x2c,
- 0x54, 0x07, 0x7d, 0x48, 0xbc, 0xa9, 0xa9, 0x75, 0x4a, 0x5d, 0x1d, 0x35, 0xa0, 0xb2, 0xe7, 0x8c,
- 0xc9, 0x13, 0xb3, 0xc4, 0xfe, 0xb6, 0xc0, 0xd8, 0x9d, 0xcc, 0xfd, 0x80, 0x78, 0x7b, 0x7d, 0xb3,
- 0xcc, 0x96, 0xb6, 0xa0, 0x72, 0xe0, 0x8e, 0x89, 0x6f, 0xea, 0x9d, 0x72, 0x77, 0xb1, 0xb7, 0x74,
- 0x8d, 0x99, 0xa6, 0x4b, 0x7b, 0xce, 0xb1, 0x8b, 0xfe, 0x0d, 0x06, 0x35, 0xfb, 0xd0, 0xf2, 0x89,
- 0x6f, 0x56, 0x98, 0x0a, 0xe2, 0x2a, 0xd1, 0x32, 0x53, 0xdb, 0x82, 0xca, 0x3d, 0x9f, 0x78, 0xbe,
- 0x59, 0x15, 0xad, 0xd0, 0x25, 0x26, 0x6e, 0x81, 0xb1, 0x6f, 0x3d, 0x61, 0x46, 0xfb, 0xe6, 0x02,
- 0xc3, 0x5d, 0x87, 0xe5, 0x7d, 0xeb, 0xc9, 0xe0, 0xc4, 0xf2, 0xc6, 0x1f, 0x78, 0xee, 0x7c, 0xb6,
- 0xd7, 0x37, 0x6b, 0x4c, 0x80, 0x00, 0x22, 0xc1, 0x5e, 0xdf, 0x34, 0xd8, 0xda, 0x55, 0xce, 0x82,
- 0x13, 0x05, 0x29, 0xd1, 0xab, 0x60, 0xec, 0x93, 0x48, 0x65, 0x51, 0xa6, 0x82, 0xaf, 0x43, 0x2d,
- 0x56, 0x07, 0x28, 0xed, 0xf5, 0x43, 0x27, 0xd5, 0x41, 0xbf, 0xe3, 0xfa, 0x01, 0xf3, 0x91, 0x81,
- 0x96, 0x61, 0x61, 0xb8, 0x7b, 0xc8, 0x16, 0xca, 0x1d, 0xad, 0x6b, 0xe0, 0xdf, 0x35, 0xa8, 0xa7,
- 0x0e, 0x5b, 0x07, 0xfd, 0xc0, 0x9a, 0x12, 0xb6, 0xdb, 0x40, 0xdb, 0xb0, 0xd6, 0x27, 0xc7, 0xd6,
- 0x7c, 0x12, 0x1c, 0x91, 0x80, 0x38, 0x81, 0xed, 0x3a, 0x87, 0xee, 0xc4, 0x1e, 0x9d, 0x85, 0xf6,
- 0x76, 0xa0, 0x95, 0x16, 0xd8, 0xc4, 0x37, 0xcb, 0x8c, 0xe0, 0x06, 0x27, 0x98, 0xd9, 0xc7, 0x30,
- 0x76, 0xa0, 0xb5, 0xeb, 0x3a, 0x81, 0xed, 0xcc, 0xdd, 0xb9, 0xff, 0xd1, 0x9c, 0x78, 0x76, 0x1c,
- 0xa2, 0x70, 0x57, 0x5a, 0xcc, 0x76, 0xe1, 0x11, 0xb4, 0x33, 0xc6, 0x06, 0x33, 0x32, 0x12, 0x08,
- 0x6b, 0x5d, 0x03, 0x35, 0xa1, 0xd6, 0x9f, 0x7b, 0x16, 0xd5, 0x31, 0x4b, 0x1d, 0xad, 0x5b, 0x46,
- 0x97, 0x00, 0x25, 0x81, 0x88, 0x65, 0x65, 0x26, 0x6b, 0x42, 0xed, 0x88, 0xcc, 0x26, 0xf6, 0xc8,
- 0x3a, 0x30, 0xf5, 0x8e, 0xd6, 0x6d, 0xe0, 0xbf, 0xb4, 0x1c, 0x8a, 0xc4, 0x2d, 0x69, 0x94, 0x52,
- 0x01, 0x4a, 0x29, 0x87, 0x52, 0xea, 0x36, 0xd0, 0xff, 0x60, 0x31, 0xd1, 0x8e, 0x52, 0x6f, 0x85,
- 0x1f, 0x5d, 0xc8, 0x1a, 0x0a, 0xfc, 0x1a, 0x34, 0x06, 0xf3, 0x87, 0xfe, 0xc8, 0xb3, 0x67, 0xd4,
- 0x64, 0x94, 0x84, 0x6b, 0xa1, 0xb2, 0x20, 0x62, 0x4e, 0xfa, 0x5e, 0x83, 0xa5, 0x8c, 0x05, 0x31,
- 0x1b, 0x5a, 0x60, 0x0c, 0x02, 0xcb, 0x0b, 0x86, 0xf6, 0x94, 0x84, 0xcc, 0x97, 0x61, 0xe1, 0x96,
- 0x33, 0x66, 0x0b, 0x9c, 0x6e, 0x0b, 0x8c, 0x3e, 0x99, 0x90, 0x80, 0x8c, 0x6f, 0x04, 0x8c, 0x6f,
- 0x19, 0x5d, 0x81, 0x2a, 0x33, 0x1a, 0x51, 0x5d, 0x16, 0xa8, 0x32, 0x8c, 0x36, 0x2c, 0x0e, 0xbd,
- 0xb9, 0x33, 0xb2, 0xf8, 0xae, 0x2a, 0xf5, 0x2e, 0xbe, 0x0b, 0x46, 0xa2, 0x21, 0xb2, 0x58, 0x81,
- 0xda, 0xdd, 0x53, 0x87, 0xd6, 0xa9, 0x6f, 0x96, 0x3a, 0xe5, 0xae, 0x7e, 0xb3, 0x64, 0x6a, 0xa8,
- 0x03, 0x55, 0xb6, 0x1a, 0x25, 0x50, 0x53, 0x00, 0x61, 0x02, 0xdc, 0x87, 0x66, 0xf6, 0xc0, 0x99,
- 0xc0, 0xd4, 0x41, 0xdf, 0x77, 0xc7, 0x24, 0xcc, 0xce, 0x15, 0xa8, 0xf7, 0x89, 0x1f, 0xd8, 0x8e,
- 0xc5, 0x5d, 0x47, 0xed, 0x1a, 0x78, 0x13, 0x20, 0xb1, 0x89, 0x96, 0xa0, 0x1a, 0x96, 0x2e, 0xe3,
- 0x86, 0x7b, 0xd0, 0x96, 0x24, 0x5f, 0x06, 0xa6, 0x01, 0x15, 0x26, 0xe2, 0x38, 0xf8, 0x3e, 0xd4,
- 0xe2, 0x6e, 0x90, 0xe3, 0x73, 0xc7, 0xf2, 0x4f, 0x42, 0x3e, 0x0d, 0xa8, 0xdc, 0x18, 0x4f, 0x6d,
- 0x9e, 0x17, 0x35, 0xf4, 0x5f, 0x80, 0x43, 0xcf, 0x7e, 0x6c, 0x4f, 0xc8, 0xa3, 0x38, 0xff, 0xdb,
- 0x49, 0x73, 0x89, 0x65, 0x78, 0x07, 0x1a, 0xa9, 0x05, 0x96, 0x7f, 0x61, 0xd1, 0x86, 0x40, 0x2d,
- 0x30, 0x62, 0x31, 0x43, 0xab, 0xe0, 0xbf, 0xab, 0xb0, 0xb0, 0xeb, 0x4e, 0xa7, 0x96, 0x33, 0x46,
- 0x1d, 0xd0, 0x83, 0xb3, 0x19, 0x57, 0x5e, 0x8a, 0x9a, 0x5c, 0x28, 0xbc, 0x36, 0x3c, 0x9b, 0x11,
- 0xfc, 0x6b, 0x15, 0x74, 0xfa, 0x03, 0xad, 0x42, 0x6b, 0xd7, 0x23, 0x56, 0x40, 0xa8, 0x5b, 0x42,
- 0x95, 0xa6, 0x46, 0x97, 0x79, 0x56, 0x88, 0xcb, 0x25, 0xb4, 0x01, 0xab, 0x5c, 0x3b, 0xe2, 0x13,
- 0x89, 0xca, 0x68, 0x1d, 0xda, 0x7d, 0xcf, 0x9d, 0x65, 0x05, 0x3a, 0xea, 0xc0, 0x26, 0xdf, 0x93,
- 0x29, 0xb4, 0x48, 0xa3, 0x82, 0xb6, 0xe1, 0x12, 0xdd, 0xaa, 0x90, 0x57, 0xd1, 0xbf, 0xa0, 0x33,
- 0x20, 0x81, 0xbc, 0x33, 0x45, 0x5a, 0x0b, 0x14, 0xe7, 0xde, 0x6c, 0xac, 0xc6, 0xa9, 0xa1, 0xcb,
- 0xb0, 0xce, 0x99, 0x24, 0x25, 0x13, 0x09, 0x0d, 0x2a, 0xe4, 0x27, 0xce, 0x0b, 0x21, 0x39, 0x43,
- 0x26, 0x59, 0x22, 0x8d, 0xc5, 0xe8, 0x0c, 0x0a, 0x79, 0x3d, 0xf1, 0x33, 0x0d, 0x6d, 0xb4, 0xdc,
- 0x40, 0x6d, 0x58, 0xa6, 0xdb, 0xc4, 0xc5, 0x25, 0xaa, 0xcb, 0x4f, 0x22, 0x2e, 0x2f, 0x53, 0x0f,
- 0x0f, 0x48, 0x10, 0xc7, 0x3d, 0x12, 0x34, 0x11, 0x82, 0x25, 0xea, 0x1f, 0x2b, 0xb0, 0xa2, 0xb5,
- 0x16, 0xda, 0x04, 0x73, 0x40, 0x02, 0x96, 0x7f, 0xb9, 0x1d, 0x28, 0x41, 0x10, 0xc3, 0xdb, 0x46,
- 0x5b, 0xb0, 0x11, 0x3a, 0x48, 0xa8, 0xbb, 0x48, 0xbc, 0xca, 0x5c, 0xe4, 0xb9, 0x33, 0x99, 0x70,
- 0x8d, 0x9a, 0x3c, 0x22, 0x53, 0xf7, 0x31, 0x39, 0x24, 0x09, 0xe9, 0xf5, 0x24, 0x63, 0xa2, 0x1b,
- 0x2d, 0x12, 0x99, 0xe9, 0x64, 0x12, 0x45, 0x1b, 0x54, 0xc4, 0xf9, 0x65, 0x45, 0x97, 0xa8, 0x88,
- 0xc7, 0x29, 0x6b, 0xf0, 0x72, 0x22, 0xca, 0xee, 0xda, 0x44, 0x6b, 0x80, 0x06, 0x24, 0xc8, 0x6e,
- 0xd9, 0x42, 0x2b, 0xd0, 0x64, 0x47, 0xa2, 0x31, 0x8f, 0x56, 0xb7, 0xff, 0x5f, 0xab, 0x8d, 0x9b,
- 0xe7, 0xe7, 0xe7, 0xe7, 0x25, 0x7c, 0x22, 0x29, 0x8f, 0xf8, 0x92, 0x8d, 0x8b, 0xfe, 0xc8, 0x72,
- 0xc6, 0x7c, 0x2c, 0xe9, 0xbd, 0x01, 0x0b, 0xa3, 0x50, 0xad, 0x91, 0xaa, 0x3b, 0x93, 0x74, 0xb4,
- 0xee, 0x62, 0x6f, 0x3d, 0x5c, 0xcc, 0x1a, 0xc5, 0x8f, 0x24, 0x15, 0x97, 0x6a, 0xa3, 0x0d, 0xa8,
- 0xdc, 0x76, 0xbd, 0x11, 0xaf, 0xf7, 0x5a, 0x01, 0xd0, 0xb1, 0x08, 0x94, 0xb3, 0x89, 0x7f, 0xd1,
- 0x14, 0x45, 0x9c, 0x69, 0x66, 0x3d, 0x58, 0xce, 0x4f, 0x01, 0x5a, 0xe1, 0x55, 0xdf, 0x7b, 0x4b,
- 0x49, 0xea, 0x11, 0xdb, 0x7a, 0x59, 0x3c, 0x7d, 0x06, 0x1e, 0xdf, 0x97, 0x76, 0x90, 0x34, 0xab,
- 0xde, 0x9b, 0x4a, 0x84, 0x13, 0x91, 0x9c, 0xc4, 0x10, 0x1d, 0x7e, 0x0a, 0x3b, 0x91, 0xa4, 0xcf,
- 0x4a, 0x7d, 0x50, 0x2a, 0xf6, 0xc1, 0x4d, 0x25, 0x43, 0x9b, 0x31, 0xc4, 0xa2, 0x0f, 0xe4, 0x4c,
- 0xf0, 0xb3, 0xa2, 0x8e, 0x28, 0xe1, 0x19, 0xf9, 0x88, 0x5d, 0x3c, 0xbd, 0xf7, 0x95, 0x0c, 0x3e,
- 0x65, 0x0c, 0x3a, 0x89, 0x8f, 0x14, 0xf8, 0x3f, 0x68, 0x17, 0xb7, 0xdc, 0x0b, 0x69, 0xdc, 0x56,
- 0xd2, 0xf8, 0x8c, 0xd1, 0xf8, 0x4f, 0x78, 0xe3, 0x5f, 0x80, 0x83, 0xff, 0xd0, 0x8a, 0x3b, 0xfb,
- 0x45, 0x44, 0xe8, 0xcc, 0x73, 0x40, 0x4e, 0xd9, 0x42, 0x39, 0x37, 0x36, 0xea, 0xb9, 0xd1, 0xb0,
- 0x42, 0x47, 0xc3, 0x82, 0x30, 0x4e, 0xc4, 0x30, 0x16, 0x11, 0xc3, 0x3f, 0x6a, 0xca, 0x1b, 0x47,
- 0x42, 0x7a, 0x09, 0xaa, 0xa9, 0x69, 0xbb, 0x05, 0x06, 0x9d, 0xd3, 0xfc, 0xc0, 0x9a, 0xce, 0xf8,
- 0xb0, 0xd6, 0x7b, 0x47, 0x49, 0x6a, 0xca, 0x48, 0x6d, 0x89, 0xb9, 0x95, 0xc3, 0xc4, 0x3f, 0x69,
- 0xca, 0x4b, 0xee, 0x05, 0xf8, 0xac, 0x40, 0x3d, 0xf5, 0xc6, 0x61, 0x8f, 0xae, 0x02, 0x4a, 0x8e,
- 0x48, 0x49, 0x01, 0x8b, 0x7f, 0xd6, 0x8a, 0xaf, 0xd6, 0x0b, 0x83, 0x1b, 0x0f, 0x67, 0x65, 0x96,
- 0x74, 0xea, 0xb0, 0xb9, 0xf9, 0xea, 0x93, 0x43, 0x46, 0xd5, 0xf7, 0x6a, 0x84, 0x0a, 0xaa, 0x6f,
- 0x96, 0xad, 0x3e, 0x05, 0xfe, 0xa9, 0x64, 0x56, 0x78, 0x89, 0x49, 0xb3, 0xe0, 0x6a, 0xf8, 0x3c,
- 0x7f, 0x07, 0x09, 0x18, 0xf8, 0xe3, 0xdc, 0x34, 0x92, 0xe9, 0xbe, 0xd7, 0x95, 0x96, 0x3d, 0x66,
- 0x79, 0x35, 0x39, 0x9b, 0x68, 0xf7, 0x44, 0x32, 0xd0, 0x14, 0x1d, 0xa8, 0xe0, 0x04, 0xbe, 0x78,
- 0x82, 0x9c, 0x51, 0xfc, 0x9d, 0x26, 0x1d, 0x92, 0x68, 0xd0, 0xa8, 0x9a, 0x93, 0x7e, 0xd4, 0x45,
- 0x61, 0x2c, 0xe5, 0x87, 0x6a, 0xea, 0xc9, 0x4a, 0xc1, 0x6d, 0x13, 0x88, 0xb7, 0x8d, 0x04, 0x11,
- 0x3f, 0xc8, 0x0e, 0x65, 0xc8, 0xe4, 0x9f, 0x35, 0x18, 0xfe, 0x62, 0x0f, 0x92, 0x4f, 0x0f, 0xbd,
- 0x1d, 0x25, 0xcc, 0x9c, 0xc1, 0xac, 0x24, 0x9d, 0x32, 0xb1, 0x87, 0x9f, 0xaa, 0x47, 0x3c, 0xc9,
- 0x79, 0xe3, 0x1c, 0xe1, 0xe3, 0xc3, 0xbb, 0x4a, 0xc8, 0xc7, 0x0c, 0x72, 0x3b, 0x86, 0x94, 0x02,
- 0xe0, 0x63, 0xc9, 0x04, 0xa9, 0xfe, 0x12, 0x51, 0x10, 0xd0, 0xd3, 0x7c, 0x40, 0xc5, 0x69, 0xe5,
- 0x4f, 0xad, 0x60, 0x26, 0x95, 0xbc, 0xd3, 0xd3, 0x21, 0x5d, 0xcf, 0xdf, 0xdf, 0xe5, 0xd4, 0xcb,
- 0x51, 0x97, 0xbe, 0x1c, 0xe9, 0xb3, 0xd7, 0xe8, 0xbd, 0xa7, 0xe4, 0x7c, 0xc6, 0x38, 0x5f, 0x49,
- 0x35, 0xdb, 0x3c, 0x3b, 0xda, 0xdb, 0x54, 0x03, 0xf3, 0x2b, 0x33, 0x2f, 0xe8, 0xb7, 0x5f, 0xa4,
- 0xfa, 0xad, 0x1c, 0x97, 0xc6, 0x2d, 0x37, 0xa6, 0xc7, 0x71, 0xd3, 0x78, 0xdc, 0x6e, 0x8c, 0xc7,
- 0xde, 0x85, 0x71, 0x7b, 0x2a, 0xc6, 0x2d, 0x67, 0x12, 0x7f, 0xab, 0x29, 0x06, 0x7f, 0x7a, 0xd6,
- 0x3b, 0xc3, 0xe1, 0x21, 0x03, 0xd1, 0x84, 0xcf, 0x54, 0x09, 0x6a, 0x3c, 0x52, 0xf3, 0x1b, 0x46,
- 0x3d, 0x54, 0x7e, 0x99, 0x1f, 0x2a, 0x33, 0x68, 0xf8, 0x54, 0xf1, 0xc8, 0x78, 0x01, 0x1a, 0x05,
- 0xc0, 0x5f, 0xc9, 0xa7, 0x59, 0x11, 0xf8, 0xb9, 0xe2, 0x09, 0xf3, 0xa2, 0x9f, 0xeb, 0x8a, 0x09,
- 0x3c, 0x13, 0x09, 0x48, 0x71, 0xf0, 0x03, 0xc5, 0x43, 0x49, 0x24, 0x50, 0x80, 0xf0, 0x5c, 0x44,
- 0x90, 0x1a, 0xc2, 0x96, 0xe2, 0xbd, 0x95, 0x42, 0x78, 0x5b, 0x89, 0x70, 0xae, 0xe5, 0x21, 0xb2,
- 0x87, 0xd8, 0xa1, 0x73, 0x99, 0x3f, 0x73, 0x1d, 0x9f, 0x50, 0xab, 0x77, 0x3f, 0x64, 0x56, 0x6b,
- 0xb4, 0x9b, 0xdd, 0xf2, 0x3c, 0xd7, 0x63, 0x4f, 0x12, 0x23, 0xf9, 0x36, 0x4c, 0xe7, 0x3b, 0x1d,
- 0x9f, 0x6b, 0xb2, 0xe7, 0xde, 0xcb, 0x67, 0x9e, 0xba, 0xfd, 0x7f, 0xcd, 0xb9, 0x9b, 0x71, 0x97,
- 0xcc, 0xfa, 0xe6, 0x93, 0xfc, 0xc3, 0x32, 0xe5, 0x16, 0x75, 0x61, 0x7d, 0xc3, 0x4d, 0xaf, 0x09,
- 0x75, 0x2c, 0x18, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xc6, 0xc9, 0x45, 0x39, 0x17, 0x00,
- 0x00,
+ // 1808 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7,
+ 0x11, 0x46, 0xcf, 0x3e, 0xb8, 0x5b, 0x7c, 0xaa, 0xf9, 0x1a, 0x4a, 0x14, 0xb3, 0x18, 0x08, 0xca,
+ 0x22, 0x08, 0x98, 0x60, 0x03, 0xe8, 0x94, 0x97, 0xc4, 0x95, 0xc4, 0x85, 0xc0, 0x47, 0x66, 0xa9,
+ 0x6b, 0x80, 0x11, 0xb7, 0x25, 0x6e, 0xb2, 0x3b, 0xb3, 0x99, 0x99, 0x95, 0xc4, 0x28, 0x4c, 0x18,
+ 0x5f, 0x7c, 0xb5, 0x61, 0x18, 0x3e, 0xe8, 0x66, 0x1f, 0x7c, 0x34, 0x0c, 0x03, 0x06, 0x0c, 0x9f,
+ 0x7c, 0xf7, 0x1f, 0xf0, 0x7f, 0xb0, 0xcf, 0xbe, 0x1a, 0xdd, 0x3d, 0x3d, 0xdd, 0x33, 0xd3, 0x3d,
+ 0x24, 0x65, 0xf9, 0x36, 0x5d, 0x55, 0xdd, 0xf5, 0x55, 0x75, 0x75, 0x75, 0x55, 0x0f, 0x2c, 0x0f,
+ 0xfd, 0x98, 0x84, 0xbe, 0x37, 0xfa, 0xdd, 0x98, 0xc4, 0xde, 0xf6, 0x24, 0x0c, 0xe2, 0x00, 0x57,
+ 0xe9, 0xb7, 0xf3, 0x5e, 0x05, 0xaa, 0x5d, 0x2f, 0xf6, 0x30, 0x86, 0xea, 0x11, 0x09, 0xc7, 0x36,
+ 0x6a, 0x59, 0xed, 0xaa, 0xcb, 0xbe, 0xf1, 0x0a, 0xd4, 0x7a, 0xfe, 0x80, 0xbc, 0xb4, 0x2d, 0x46,
+ 0xe4, 0x03, 0xbc, 0x09, 0xcd, 0x9d, 0xd1, 0x34, 0x8a, 0x49, 0xd8, 0xeb, 0xda, 0x15, 0xc6, 0x91,
+ 0x04, 0x7c, 0x0b, 0x6a, 0xfb, 0xc1, 0x80, 0x44, 0x76, 0xb5, 0x55, 0x69, 0xcf, 0x76, 0x16, 0xb6,
+ 0x99, 0x4a, 0x4a, 0xea, 0xf9, 0x4f, 0x03, 0x97, 0x33, 0xf1, 0xef, 0xa1, 0x49, 0xb5, 0x3e, 0xf1,
+ 0x22, 0x12, 0xd9, 0x35, 0x26, 0x89, 0xb9, 0xa4, 0x20, 0x33, 0x69, 0x29, 0x44, 0xd7, 0x7d, 0x1c,
+ 0x91, 0x30, 0xb2, 0xeb, 0xea, 0xba, 0x94, 0xc4, 0xd7, 0x65, 0x4c, 0x8a, 0x6d, 0xcf, 0x7b, 0xc9,
+ 0xb4, 0x75, 0xed, 0x19, 0x8e, 0x2d, 0x25, 0xe0, 0x36, 0x2c, 0xee, 0x79, 0x2f, 0xfb, 0x27, 0x5e,
+ 0x38, 0x78, 0x18, 0x06, 0xd3, 0x49, 0xaf, 0x6b, 0x37, 0x98, 0x4c, 0x9e, 0x8c, 0xb7, 0x00, 0x04,
+ 0xa9, 0xd7, 0xb5, 0x9b, 0x4c, 0x48, 0xa1, 0xe0, 0xdf, 0x72, 0xfc, 0xdc, 0x52, 0xd0, 0x5a, 0x2a,
+ 0x05, 0xa8, 0xf4, 0x1e, 0x11, 0xd2, 0xb3, 0x7a, 0xe9, 0x54, 0xc0, 0xd9, 0x85, 0x86, 0x20, 0xe3,
+ 0x05, 0xb0, 0x7a, 0xdd, 0x64, 0x4f, 0xac, 0x5e, 0x97, 0xee, 0xd2, 0x6e, 0x10, 0xc5, 0x6c, 0x43,
+ 0x9a, 0x2e, 0xfb, 0xc6, 0x36, 0xcc, 0x1c, 0xed, 0x1c, 0x32, 0x72, 0xa5, 0x85, 0xda, 0x4d, 0x57,
+ 0x0c, 0x9d, 0xef, 0x11, 0xcc, 0xa9, 0xfe, 0xa4, 0xd3, 0xf7, 0xbd, 0x31, 0x61, 0x0b, 0x36, 0x5d,
+ 0xf6, 0x8d, 0xef, 0xc0, 0x5a, 0x97, 0x3c, 0xf5, 0xa6, 0xa3, 0xd8, 0x25, 0x31, 0xf1, 0xe3, 0x61,
+ 0xe0, 0x1f, 0x06, 0xa3, 0xe1, 0xf1, 0x69, 0xa2, 0xc4, 0xc0, 0xc5, 0x0f, 0xe1, 0x5a, 0x96, 0x34,
+ 0x24, 0x91, 0x5d, 0x61, 0xc6, 0x6d, 0x70, 0xe3, 0x72, 0x33, 0x98, 0x9d, 0xc5, 0x39, 0x74, 0xa1,
+ 0x9d, 0xc0, 0x8f, 0x87, 0xfe, 0x34, 0x98, 0x46, 0x7f, 0x9b, 0x92, 0x70, 0x98, 0x46, 0x4f, 0xb2,
+ 0x50, 0x96, 0x9d, 0x2c, 0x54, 0x98, 0xe3, 0xbc, 0x8f, 0x60, 0x39, 0xa7, 0xb3, 0x3f, 0x21, 0xc7,
+ 0x8a, 0xd5, 0x28, 0xb5, 0xfa, 0x3a, 0x34, 0xba, 0xd3, 0xd0, 0xa3, 0x92, 0xb6, 0xd5, 0x42, 0xed,
+ 0x8a, 0x9b, 0x8e, 0xf1, 0x36, 0x60, 0x19, 0x0c, 0xa9, 0x54, 0x85, 0x49, 0x69, 0x38, 0x74, 0x2d,
+ 0x97, 0x4c, 0x46, 0xc3, 0x63, 0x6f, 0xdf, 0xae, 0xb6, 0x50, 0x7b, 0xde, 0x4d, 0xc7, 0xce, 0xbb,
+ 0x56, 0x01, 0x93, 0x71, 0x27, 0xb2, 0x98, 0xac, 0x4b, 0x61, 0xb2, 0x2e, 0x85, 0xc9, 0x52, 0x31,
+ 0xe1, 0x3b, 0x30, 0x2b, 0x67, 0x88, 0xe3, 0xb7, 0xc2, 0x5d, 0xad, 0x9c, 0x02, 0xea, 0x65, 0x55,
+ 0x10, 0xff, 0x11, 0xe6, 0xfb, 0xd3, 0x27, 0xd1, 0x71, 0x38, 0x9c, 0x50, 0x1d, 0xe2, 0x28, 0xae,
+ 0x25, 0x33, 0x15, 0x16, 0x9b, 0x9b, 0x15, 0x76, 0xbe, 0x41, 0xb0, 0x90, 0x5d, 0xbd, 0x10, 0xdd,
+ 0x9b, 0xd0, 0xec, 0xc7, 0x5e, 0x18, 0x1f, 0x0d, 0xc7, 0x24, 0xf1, 0x80, 0x24, 0xd0, 0x38, 0xbf,
+ 0xef, 0x0f, 0x18, 0x8f, 0xdb, 0x2d, 0x86, 0x74, 0x5e, 0x97, 0x8c, 0x48, 0x4c, 0x06, 0x77, 0x63,
+ 0x66, 0x6d, 0xc5, 0x95, 0x04, 0xfc, 0x6b, 0xa8, 0x33, 0xbd, 0xc2, 0xd2, 0x45, 0xc5, 0x52, 0x06,
+ 0x34, 0x61, 0xe3, 0x16, 0xcc, 0x1e, 0x85, 0x53, 0xff, 0xd8, 0xe3, 0x0b, 0xd5, 0xd9, 0x86, 0xab,
+ 0x24, 0x87, 0x40, 0x33, 0x9d, 0x56, 0x40, 0xbf, 0x05, 0x8d, 0x83, 0x17, 0x3e, 0x4d, 0x82, 0x91,
+ 0x6d, 0xb5, 0x2a, 0xed, 0xea, 0x3d, 0xcb, 0x46, 0x6e, 0x4a, 0xc3, 0x6d, 0xa8, 0xb3, 0x6f, 0x71,
+ 0x4a, 0x96, 0x14, 0x1c, 0x8c, 0xe1, 0x26, 0x7c, 0xe7, 0xef, 0xb0, 0x94, 0xf7, 0xa6, 0x36, 0x60,
+ 0x30, 0x54, 0xf7, 0x82, 0x01, 0x11, 0xd9, 0x80, 0x7e, 0x63, 0x07, 0xe6, 0xba, 0x24, 0x8a, 0x87,
+ 0xbe, 0xc7, 0xf7, 0x88, 0xea, 0x6a, 0xba, 0x19, 0x9a, 0x73, 0x0b, 0x40, 0x6a, 0xc5, 0x6b, 0x50,
+ 0x4f, 0x12, 0x26, 0xb7, 0x25, 0x19, 0x39, 0x7f, 0x81, 0x65, 0xcd, 0xc1, 0xd3, 0x02, 0x59, 0x81,
+ 0x1a, 0x13, 0x48, 0x90, 0xf0, 0x81, 0x73, 0x06, 0x0d, 0x91, 0x9f, 0x4d, 0xf0, 0x77, 0xbd, 0xe8,
+ 0x24, 0x4d, 0x66, 0x5e, 0x74, 0x42, 0x57, 0xba, 0x3b, 0x18, 0x0f, 0x79, 0x68, 0x37, 0x5c, 0x3e,
+ 0xc0, 0x7f, 0x00, 0x38, 0x0c, 0x87, 0xcf, 0x87, 0x23, 0xf2, 0x2c, 0xcd, 0x0d, 0xcb, 0xf2, 0x06,
+ 0x48, 0x79, 0xae, 0x22, 0xe6, 0xf4, 0x60, 0x3e, 0xc3, 0x64, 0xe7, 0x2b, 0xc9, 0x86, 0x09, 0x8e,
+ 0x74, 0x4c, 0x43, 0x28, 0x15, 0x64, 0x80, 0x6a, 0xae, 0x24, 0x38, 0xdf, 0xd5, 0x61, 0x66, 0x27,
+ 0x18, 0x8f, 0x3d, 0x7f, 0x80, 0x6f, 0x43, 0x35, 0x3e, 0x9d, 0xf0, 0x15, 0x16, 0xc4, 0xad, 0x95,
+ 0x30, 0xb7, 0x8f, 0x4e, 0x27, 0xc4, 0x65, 0x7c, 0xe7, 0x75, 0x1d, 0xaa, 0x74, 0x88, 0x57, 0xe1,
+ 0xda, 0x4e, 0x48, 0xbc, 0x98, 0x50, 0xbf, 0x26, 0x82, 0x4b, 0x88, 0x92, 0x79, 0x8c, 0xaa, 0x64,
+ 0x0b, 0x6f, 0xc0, 0x2a, 0x97, 0x16, 0xd0, 0x04, 0xab, 0x82, 0xd7, 0x61, 0xb9, 0x1b, 0x06, 0x93,
+ 0x3c, 0xa3, 0x8a, 0x5b, 0xb0, 0xc9, 0xe7, 0xe4, 0x32, 0x8d, 0x90, 0xa8, 0xe1, 0x2d, 0xb8, 0x4e,
+ 0xa7, 0x1a, 0xf8, 0x75, 0x7c, 0x0b, 0x5a, 0x7d, 0x12, 0xeb, 0x33, 0xbd, 0x90, 0x9a, 0xa1, 0x7a,
+ 0x1e, 0x4f, 0x06, 0x66, 0x3d, 0x0d, 0x7c, 0x03, 0xd6, 0x39, 0x12, 0x79, 0xd2, 0x05, 0xb3, 0x49,
+ 0x99, 0xdc, 0xe2, 0x22, 0x13, 0xa4, 0x0d, 0xb9, 0x98, 0x13, 0x12, 0xb3, 0xc2, 0x06, 0x03, 0x7f,
+ 0x4e, 0xfa, 0x99, 0xee, 0xba, 0x20, 0xcf, 0xe3, 0x65, 0x58, 0xa4, 0xd3, 0x54, 0xe2, 0x02, 0x95,
+ 0xe5, 0x96, 0xa8, 0xe4, 0x45, 0xea, 0xe1, 0x3e, 0x89, 0xd3, 0x7d, 0x17, 0x8c, 0x25, 0x8c, 0x61,
+ 0x81, 0xfa, 0xc7, 0x8b, 0x3d, 0x41, 0xbb, 0x86, 0x37, 0xc1, 0xee, 0x93, 0x98, 0x05, 0x68, 0x61,
+ 0x06, 0x96, 0x1a, 0xd4, 0xed, 0x5d, 0xc6, 0x37, 0x61, 0x23, 0x71, 0x90, 0x72, 0xc0, 0x05, 0x7b,
+ 0x95, 0xb9, 0x28, 0x0c, 0x26, 0x3a, 0xe6, 0x1a, 0x5d, 0xd2, 0x25, 0xe3, 0xe0, 0x39, 0x39, 0x24,
+ 0x12, 0xf4, 0xba, 0x8c, 0x18, 0x51, 0x42, 0x08, 0x96, 0x9d, 0x0d, 0x26, 0x95, 0xb5, 0x41, 0x59,
+ 0x1c, 0x5f, 0x9e, 0x75, 0x9d, 0xb2, 0xf8, 0x3e, 0xe5, 0x17, 0xbc, 0x21, 0x59, 0xf9, 0x59, 0x9b,
+ 0x78, 0x0d, 0x70, 0x9f, 0xc4, 0xf9, 0x29, 0x37, 0xf1, 0x0a, 0x2c, 0x31, 0x93, 0xe8, 0x9e, 0x0b,
+ 0xea, 0xd6, 0x6f, 0x1a, 0x8d, 0xc1, 0xd2, 0xf9, 0xf9, 0xf9, 0xb9, 0xe5, 0x9c, 0x69, 0x8e, 0x47,
+ 0x5a, 0xe7, 0x20, 0xa5, 0xce, 0xc1, 0x50, 0x75, 0x3d, 0x7f, 0x90, 0x14, 0xa3, 0xec, 0xbb, 0xf3,
+ 0x57, 0x98, 0x39, 0x4e, 0xa6, 0xcc, 0x67, 0x4e, 0xa2, 0x4d, 0x5a, 0xa8, 0x3d, 0xdb, 0x59, 0x4f,
+ 0x88, 0x79, 0x05, 0xae, 0x98, 0xe6, 0xbc, 0xd2, 0x1c, 0xc3, 0x42, 0x6a, 0x5f, 0x81, 0xda, 0x83,
+ 0x20, 0x3c, 0xe6, 0x99, 0xa1, 0xe1, 0xf2, 0x41, 0x89, 0xf2, 0xa7, 0xaa, 0xf2, 0xc2, 0xf2, 0x52,
+ 0xf9, 0x97, 0xc8, 0x70, 0xda, 0xb5, 0xf9, 0x72, 0x07, 0x16, 0x8b, 0x25, 0x1a, 0x2a, 0xaf, 0xb7,
+ 0xf2, 0x33, 0x3a, 0x5d, 0x23, 0xe8, 0x67, 0x6c, 0xad, 0x1b, 0xaa, 0xc7, 0x72, 0xa8, 0x24, 0xf0,
+ 0xb1, 0x36, 0x15, 0xe9, 0x50, 0x77, 0xee, 0x19, 0x15, 0x9e, 0xa8, 0xe0, 0x35, 0xcb, 0x49, 0x75,
+ 0xdf, 0xa2, 0xf2, 0x0c, 0x57, 0x9a, 0xda, 0xb5, 0x6e, 0xb3, 0xae, 0xe8, 0xb6, 0x47, 0x46, 0x2b,
+ 0x86, 0xcc, 0x0a, 0x47, 0x75, 0x9b, 0x1e, 0xa4, 0x34, 0xe7, 0x23, 0x54, 0x96, 0x8e, 0x4b, 0x8d,
+ 0x11, 0x1e, 0xb6, 0x14, 0x0f, 0xf7, 0x8c, 0xd8, 0xfe, 0xc1, 0xb0, 0xb5, 0xa4, 0x87, 0x2f, 0x42,
+ 0xf6, 0x09, 0xba, 0xf8, 0x22, 0xb8, 0x32, 0xbe, 0x03, 0x23, 0xbe, 0x7f, 0x32, 0x7c, 0xb7, 0x93,
+ 0x42, 0xe8, 0x02, 0xbd, 0x12, 0xe5, 0x0f, 0xa8, 0xfc, 0x22, 0xba, 0x2a, 0x42, 0x5a, 0x5a, 0xee,
+ 0x93, 0x17, 0x8c, 0x9c, 0xb4, 0x50, 0xc9, 0x30, 0x53, 0x93, 0x57, 0x73, 0x7d, 0x82, 0x5a, 0x63,
+ 0xd7, 0xb2, 0x75, 0x7f, 0x49, 0xbc, 0x8c, 0xd4, 0x78, 0x29, 0xb3, 0x42, 0xda, 0xfb, 0x05, 0x32,
+ 0x5e, 0xab, 0xa5, 0xa6, 0xae, 0x41, 0x3d, 0xd3, 0xca, 0x25, 0x23, 0x5a, 0xec, 0xd0, 0xba, 0x39,
+ 0x8a, 0xbd, 0xf1, 0x24, 0xa9, 0xa5, 0x25, 0xa1, 0xf3, 0xc0, 0x08, 0x7d, 0xcc, 0xa0, 0xdf, 0x54,
+ 0x43, 0xbd, 0x00, 0x48, 0xa2, 0xfe, 0x0a, 0x19, 0xef, 0xfb, 0x37, 0x42, 0xed, 0xc0, 0x5c, 0xa6,
+ 0x75, 0xe7, 0x4f, 0x0f, 0x19, 0x5a, 0x09, 0x76, 0x5f, 0xc5, 0x6e, 0x80, 0x25, 0xb1, 0x7f, 0x8e,
+ 0xca, 0xcb, 0x91, 0x2b, 0x47, 0x58, 0x5a, 0x21, 0x57, 0x94, 0x0a, 0xb9, 0x24, 0x4a, 0x82, 0x62,
+ 0x56, 0xd1, 0x23, 0x29, 0x66, 0x95, 0xb7, 0x83, 0xb8, 0x24, 0xab, 0x4c, 0xf2, 0x59, 0xe5, 0x22,
+ 0x64, 0x1f, 0x20, 0x4d, 0x69, 0xf6, 0xf3, 0x5a, 0x82, 0x92, 0xcb, 0xf7, 0x5f, 0xc5, 0x9b, 0x5f,
+ 0x51, 0x2b, 0x51, 0x91, 0x42, 0x61, 0xa8, 0xbd, 0xbf, 0xfe, 0x6c, 0x54, 0x14, 0x32, 0x45, 0xab,
+ 0xd2, 0x0f, 0x5a, 0x35, 0x67, 0x9a, 0x52, 0xf3, 0xb2, 0xb6, 0x97, 0x58, 0x19, 0xa9, 0x56, 0x16,
+ 0x14, 0x48, 0xf5, 0x9f, 0x21, 0x6d, 0x4d, 0x4b, 0xc3, 0x81, 0xca, 0xfb, 0x12, 0x45, 0x3a, 0xce,
+ 0x84, 0x8a, 0x55, 0xd6, 0x28, 0x55, 0x72, 0x8d, 0x52, 0xc9, 0x65, 0x1f, 0xab, 0x97, 0xbd, 0x06,
+ 0x90, 0x44, 0x1c, 0xe4, 0x6b, 0x6d, 0xbc, 0xc5, 0xdf, 0x28, 0x19, 0xce, 0xd9, 0x0e, 0xc8, 0x87,
+ 0x42, 0x97, 0xd1, 0x3b, 0x7f, 0x32, 0x6a, 0x9d, 0x32, 0xad, 0x2b, 0xf2, 0x82, 0x91, 0xab, 0x4a,
+ 0x85, 0x1f, 0x22, 0x73, 0x25, 0x5f, 0xea, 0xa7, 0x34, 0x32, 0x2d, 0x35, 0x32, 0x1f, 0x1a, 0xd1,
+ 0x3c, 0x67, 0x68, 0xb6, 0x52, 0x34, 0x5a, 0x8d, 0x12, 0xd7, 0xa9, 0xa6, 0x85, 0xb8, 0xcc, 0x8b,
+ 0x60, 0x49, 0xd4, 0xbc, 0x28, 0x46, 0x8d, 0xb6, 0x30, 0xfd, 0x11, 0x95, 0xf4, 0x29, 0xc6, 0xc7,
+ 0x2b, 0x53, 0xcc, 0xb4, 0x8b, 0x15, 0x18, 0x4f, 0x83, 0x79, 0x72, 0xfa, 0xa2, 0x51, 0x2d, 0x79,
+ 0xd1, 0xa8, 0x15, 0x5f, 0x34, 0x3a, 0xbb, 0x46, 0x8b, 0x4f, 0x99, 0xc5, 0xbf, 0xca, 0xdc, 0x59,
+ 0x45, 0x93, 0xa4, 0xe5, 0x5f, 0x23, 0x63, 0x0b, 0xf6, 0xcb, 0xd9, 0x5d, 0x72, 0x6f, 0xfd, 0x3b,
+ 0x73, 0x6f, 0xe9, 0x81, 0x65, 0x42, 0xa6, 0xd0, 0x22, 0xa6, 0x21, 0x83, 0x64, 0xc8, 0xdc, 0x1d,
+ 0x0c, 0x42, 0x11, 0x32, 0xf4, 0xbb, 0x24, 0x64, 0x5e, 0xa9, 0x21, 0x53, 0x58, 0x5c, 0xaa, 0xfe,
+ 0x14, 0x19, 0xfa, 0x50, 0xea, 0xa2, 0xdd, 0xa3, 0xa3, 0x43, 0xa6, 0x33, 0x39, 0x42, 0x62, 0x9c,
+ 0x3c, 0x5e, 0x2b, 0x70, 0xc4, 0x30, 0x6d, 0xf7, 0x2a, 0x4a, 0xbb, 0x67, 0x6e, 0x5e, 0xfe, 0x53,
+ 0x6c, 0x5e, 0x72, 0x30, 0x32, 0xd7, 0x91, 0xbe, 0x2d, 0x7e, 0x33, 0xa4, 0x25, 0xa8, 0xce, 0xf4,
+ 0x2d, 0x95, 0x16, 0xd5, 0x6b, 0x64, 0xe8, 0xc8, 0xaf, 0xfe, 0x13, 0xc0, 0x52, 0x7e, 0x02, 0x94,
+ 0xa0, 0xfb, 0xaf, 0x8a, 0x4e, 0xab, 0x5a, 0x6d, 0xf8, 0xf4, 0x6f, 0x02, 0x79, 0x70, 0x25, 0xea,
+ 0xfe, 0xa7, 0xaa, 0xd3, 0x2e, 0x26, 0xd5, 0xf9, 0x86, 0x77, 0x86, 0x82, 0xba, 0xfb, 0x46, 0x75,
+ 0xe7, 0xa8, 0xa8, 0xcf, 0x68, 0xde, 0x03, 0x5a, 0xca, 0x47, 0x93, 0xc0, 0x8f, 0x08, 0x55, 0x71,
+ 0xf0, 0x88, 0xa9, 0x68, 0xb8, 0xd6, 0xc1, 0x23, 0x9a, 0xe5, 0xef, 0x87, 0x61, 0x10, 0xb2, 0x66,
+ 0xbb, 0xe9, 0xf2, 0x81, 0xfc, 0x37, 0x56, 0x61, 0xe7, 0x8a, 0x0f, 0x9c, 0x8f, 0x91, 0xee, 0x15,
+ 0xe4, 0x2d, 0x9e, 0x00, 0xf3, 0x05, 0xfb, 0x7f, 0x6e, 0xaf, 0x9d, 0xde, 0x2e, 0x46, 0xe7, 0x0e,
+ 0x8a, 0x2f, 0x32, 0x05, 0xbf, 0x9a, 0xf3, 0xc1, 0x3b, 0x5c, 0xcf, 0x9a, 0x92, 0x91, 0x94, 0x85,
+ 0x52, 0x2d, 0x3f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x04, 0x86, 0xd9, 0x75, 0x1c, 0x00, 0x00,
}
diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go
index 2c1e1b8ada..bf49120ff5 100644
--- a/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go
+++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go
@@ -8,8 +8,13 @@ import (
"fmt"
"io"
+ "archive/tar"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/tcp"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
)
// Client provides an API for the snapshotter service.
@@ -22,6 +27,135 @@ func NewClient(host string) *Client {
return &Client{host: host}
}
+// takes a request object, writes a Base64 encoding to the tcp connection, and then sends the request to the snapshotter service.
+// returns a mapping of the uploaded metadata shardID's to actual shardID's on the destination system.
+func (c *Client) UpdateMeta(req *Request, upStream io.Reader) (map[uint64]uint64, error) {
+ var err error
+
+ // Connect to snapshotter service.
+ conn, err := tcp.Dial("tcp", c.host, MuxHeader)
+ if err != nil {
+ return nil, err
+ }
+ defer conn.Close()
+
+ if _, err := conn.Write([]byte{byte(req.Type)}); err != nil {
+ return nil, err
+ }
+
+ if err := json.NewEncoder(conn).Encode(req); err != nil {
+ return nil, fmt.Errorf("encode snapshot request: %s", err)
+ }
+
+ if n, err := io.Copy(conn, upStream); (err != nil && err != io.EOF) || n != req.UploadSize {
+ return nil, fmt.Errorf("error uploading file: err=%v, n=%d, uploadSize: %d", err, n, req.UploadSize)
+ }
+
+ resp, err := ioutil.ReadAll(conn)
+ if err != nil || len(resp) == 0 {
+ return nil, fmt.Errorf("updating metadata on influxd service failed: err=%v, n=%d", err, len(resp))
+ }
+
+ if len(resp) < 16 {
+ return nil, fmt.Errorf("response too short to be a metadata update response: %d", len(resp))
+ }
+ header, npairs, err := decodeUintPair(resp[:16])
+ if err != nil {
+ return nil, err
+ }
+
+ if npairs == 0 {
+ return nil, fmt.Errorf("DB metadata not changed. database may already exist")
+ }
+
+ pairs := resp[16:]
+
+ if header != BackupMagicHeader {
+ return nil, fmt.Errorf("Response did not contain the proper header tag.")
+ }
+
+ if uint64(len(pairs)) != npairs*16 {
+ return nil, fmt.Errorf("expected an even number of integer pairs in update meta repsonse")
+ }
+
+ shardIDMap := make(map[uint64]uint64)
+ for i := 0; i < int(npairs); i++ {
+ offset := i * 16
+ k, v, err := decodeUintPair(pairs[offset : offset+16])
+ if err != nil {
+ return nil, err
+ }
+ shardIDMap[k] = v
+ }
+
+ return shardIDMap, nil
+}
+
+func decodeUintPair(bits []byte) (uint64, uint64, error) {
+ if len(bits) != 16 {
+ return 0, 0, errors.New("slice must have exactly 16 bytes")
+ }
+ v1 := binary.BigEndian.Uint64(bits[:8])
+ v2 := binary.BigEndian.Uint64(bits[8:16])
+ return v1, v2, nil
+}
+
+func (c *Client) UploadShard(shardID, newShardID uint64, destinationDatabase, restoreRetention string, tr *tar.Reader) error {
+
+ conn, err := tcp.Dial("tcp", c.host, MuxHeader)
+ defer conn.Close()
+ if err != nil {
+ return err
+ }
+
+ var shardBytes [9]byte
+ shardBytes[0] = byte(RequestShardUpdate)
+ binary.BigEndian.PutUint64(shardBytes[1:], newShardID)
+ if _, err := conn.Write(shardBytes[:]); err != nil {
+ return err
+ }
+
+ tw := tar.NewWriter(conn)
+ defer tw.Close()
+
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ names := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator))
+
+ if len(names) < 4 {
+ return fmt.Errorf("error parsing file name from shard tarfile: %s", hdr.Name)
+ }
+
+ if destinationDatabase == "" {
+ destinationDatabase = names[0]
+ }
+
+ if restoreRetention == "" {
+ restoreRetention = names[1]
+ }
+
+ filepathArgs := []string{destinationDatabase, restoreRetention, strconv.FormatUint(newShardID, 10)}
+ filepathArgs = append(filepathArgs, names[3:]...)
+ hdr.Name = filepath.ToSlash(filepath.Join(filepathArgs...))
+
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(tw, tr); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
// MetastoreBackup returns a snapshot of the meta store.
func (c *Client) MetastoreBackup() (*meta.Data, error) {
req := &Request{
diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go
index 6053e3ed99..2296c4e4c0 100644
--- a/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go
+++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go
@@ -44,6 +44,9 @@ type Service struct {
ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error
Shard(id uint64) *tsdb.Shard
ShardRelativePath(id uint64) (string, error)
+ SetShardEnabled(shardID uint64, enabled bool) error
+ RestoreShard(id uint64, r io.Reader) error
+ CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error
}
Listener net.Listener
@@ -89,7 +92,6 @@ func (s *Service) serve() {
for {
// Wait for next connection.
conn, err := s.Listener.Accept()
-
if err != nil && strings.Contains(err.Error(), "connection closed") {
s.Logger.Info("snapshot listener closed")
return
@@ -119,7 +121,11 @@ func (s *Service) handleConn(conn net.Conn) error {
return err
}
- r, err := s.readRequest(conn)
+ if RequestType(typ[0]) == RequestShardUpdate {
+ return s.updateShardsLive(conn)
+ }
+
+ r, bytes, err := s.readRequest(conn)
if err != nil {
return fmt.Errorf("read request: %s", err)
}
@@ -141,6 +147,8 @@ func (s *Service) handleConn(conn net.Conn) error {
return s.writeDatabaseInfo(conn, r.BackupDatabase)
case RequestRetentionPolicyInfo:
return s.writeRetentionPolicyInfo(conn, r.BackupDatabase, r.BackupRetentionPolicy)
+ case RequestMetaStoreUpdate:
+ return s.updateMetaStore(conn, bytes, r.BackupDatabase, r.RestoreDatabase, r.BackupRetentionPolicy, r.RestoreRetentionPolicy)
default:
return fmt.Errorf("request type unknown: %v", r.Type)
}
@@ -148,10 +156,109 @@ func (s *Service) handleConn(conn net.Conn) error {
return nil
}
+func (s *Service) updateShardsLive(conn net.Conn) error {
+ var sidBytes [8]byte
+ _, err := conn.Read(sidBytes[:])
+ if err != nil {
+ return err
+ }
+ sid := binary.BigEndian.Uint64(sidBytes[:])
+
+ if err := s.TSDBStore.SetShardEnabled(sid, false); err != nil {
+ return err
+ }
+ defer s.TSDBStore.SetShardEnabled(sid, true)
+
+ if err := s.TSDBStore.RestoreShard(sid, conn); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *Service) updateMetaStore(conn net.Conn, bits []byte, backupDBName, restoreDBName, backupRPName, restoreRPName string) error {
+ md := meta.Data{}
+ err := md.UnmarshalBinary(bits)
+ if err != nil {
+ if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil {
+ return err
+ }
+ return fmt.Errorf("failed to decode meta: %s", err)
+ }
+
+ data := s.MetaClient.(*meta.Client).Data()
+
+ IDMap, newDBs, err := data.ImportData(md, backupDBName, restoreDBName, backupRPName, restoreRPName)
+ if err != nil {
+ if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil {
+ return err
+ }
+ return err
+ }
+
+ err = s.MetaClient.(*meta.Client).SetData(&data)
+ if err != nil {
+ return err
+ }
+
+ err = s.createNewDBShards(data, newDBs)
+ if err != nil {
+ return err
+ }
+
+ err = s.respondIDMap(conn, IDMap)
+ return err
+}
+
+// iterate over a list of newDB's that should have just been added to the metadata
+// If the db was not created in the metadata return an error.
+// None of the shards should exist on a new DB, and CreateShard protects against double-creation.
+func (s *Service) createNewDBShards(data meta.Data, newDBs []string) error {
+ for _, restoreDBName := range newDBs {
+ dbi := data.Database(restoreDBName)
+ if dbi == nil {
+ return fmt.Errorf("db %s not found when creating new db shards", restoreDBName)
+ }
+ for _, rpi := range dbi.RetentionPolicies {
+ for _, sgi := range rpi.ShardGroups {
+ for _, shard := range sgi.Shards {
+ err := s.TSDBStore.CreateShard(restoreDBName, rpi.Name, shard.ID, true)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// send the IDMapping based on the metadata from the source server vs the shard ID
+// metadata on this server. Sends back [BackupMagicHeader,0] if there's no mapped
+// values, signaling that nothing should be imported.
+func (s *Service) respondIDMap(conn net.Conn, IDMap map[uint64]uint64) error {
+ npairs := len(IDMap)
+ // 2 information ints, then npairs of 8byte ints.
+ numBytes := make([]byte, (npairs+1)*16)
+
+ binary.BigEndian.PutUint64(numBytes[:8], BackupMagicHeader)
+ binary.BigEndian.PutUint64(numBytes[8:16], uint64(npairs))
+ next := 16
+ for k, v := range IDMap {
+ binary.BigEndian.PutUint64(numBytes[next:next+8], k)
+ binary.BigEndian.PutUint64(numBytes[next+8:next+16], v)
+ next += 16
+ }
+
+ if _, err := conn.Write(numBytes[:]); err != nil {
+ return err
+ }
+ return nil
+}
+
func (s *Service) writeMetaStore(conn net.Conn) error {
// Retrieve and serialize the current meta data.
metaBlob, err := s.MetaClient.MarshalBinary()
-
if err != nil {
return fmt.Errorf("marshal meta: %s", err)
}
@@ -274,12 +381,39 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol
}
// readRequest unmarshals a request object from the conn.
-func (s *Service) readRequest(conn net.Conn) (Request, error) {
+func (s *Service) readRequest(conn net.Conn) (Request, []byte, error) {
var r Request
- if err := json.NewDecoder(conn).Decode(&r); err != nil {
- return r, err
+ d := json.NewDecoder(conn)
+
+ if err := d.Decode(&r); err != nil {
+ return r, nil, err
+ }
+
+ bits := make([]byte, r.UploadSize+1)
+
+ if r.UploadSize > 0 {
+
+ remainder := d.Buffered()
+
+ n, err := remainder.Read(bits)
+ if err != nil && err != io.EOF {
+ return r, bits, err
+ }
+
+ // it is a bit random but sometimes the Json decoder will consume all the bytes and sometimes
+ // it will leave a few behind.
+ if err != io.EOF && n < int(r.UploadSize+1) {
+ n, err = conn.Read(bits[n:])
+ }
+
+ if err != nil && err != io.EOF {
+ return r, bits, err
+ }
+ // the JSON encoder on the client side seems to write an extra byte, so trim that off the front.
+ return r, bits[1:], nil
}
- return r, nil
+
+ return r, bits, nil
}
// RequestType indicates the typeof snapshot request.
@@ -292,6 +426,9 @@ const (
// RequestMetastoreBackup represents a request to back up the metastore.
RequestMetastoreBackup
+ // RequestSeriesFileBackup represents a request to back up the database series file.
+ RequestSeriesFileBackup
+
// RequestDatabaseInfo represents a request for database info.
RequestDatabaseInfo
diff --git a/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go b/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go
index 3f4100f89b..fc9395dce3 100644
--- a/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go
+++ b/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go
@@ -157,7 +157,7 @@ RETRY:
keyb := []byte(key)
mm, _ := models.ParseName(keyb)
c.row.measurement = string(mm)
- c.tags, _ = models.ParseTags(keyb)
+ c.tags = models.ParseTags(keyb)
c.filterset = mapValuer{"_name": c.row.measurement}
for _, tag := range c.tags {
diff --git a/vendor/github.com/influxdata/influxdb/test.sh b/vendor/github.com/influxdata/influxdb/test.sh
index 2da83eb265..0da63a96b6 100755
--- a/vendor/github.com/influxdata/influxdb/test.sh
+++ b/vendor/github.com/influxdata/influxdb/test.sh
@@ -26,7 +26,7 @@ OUTPUT_DIR=${OUTPUT_DIR-./test-logs}
# Set default parallelism
PARALLELISM=${PARALLELISM-1}
# Set default timeout
-TIMEOUT=${TIMEOUT-1200s}
+TIMEOUT=${TIMEOUT-1500s}
# Default to deleteing the container
DOCKER_RM=${DOCKER_RM-true}
diff --git a/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go b/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go
index 7d7962f38c..159c7b4c47 100644
--- a/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go
+++ b/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "fmt"
"github.com/influxdata/influxdb/cmd/influxd/backup"
"github.com/influxdata/influxdb/cmd/influxd/restore"
)
@@ -15,16 +16,21 @@ import (
func TestServer_BackupAndRestore(t *testing.T) {
config := NewConfig()
config.Data.Engine = "tsm1"
- config.Data.Dir, _ = ioutil.TempDir("", "data_backup")
- config.Meta.Dir, _ = ioutil.TempDir("", "meta_backup")
config.BindAddress = freePort()
- backupDir, _ := ioutil.TempDir("", "backup")
- defer os.RemoveAll(backupDir)
+ fullBackupDir, _ := ioutil.TempDir("", "backup")
+ defer os.RemoveAll(fullBackupDir)
+
+ partialBackupDir, _ := ioutil.TempDir("", "backup")
+ defer os.RemoveAll(partialBackupDir)
+
+ enterpriseBackupDir, _ := ioutil.TempDir("", "backup")
+ defer os.RemoveAll(enterpriseBackupDir)
db := "mydb"
rp := "forever"
- expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23]]}]}]}`
+ expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24],["1970-01-01T00:00:00.009Z","C",25]]}]}]}`
+ partialExpected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24]]}]}]}`
// set the cache snapshot size low so that a single point will cause TSM file creation
config.Data.CacheSnapshotMemorySize = 1
@@ -37,7 +43,7 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Skip("Skipping. Cannot modify remote server config")
}
- if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(db, NewRetentionPolicySpec(rp, 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -48,7 +54,23 @@ func TestServer_BackupAndRestore(t *testing.T) {
// wait for the snapshot to write
time.Sleep(time.Second)
- res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
+ if _, err := s.Write(db, rp, "myseries,host=B value=24 5000000", nil); err != nil {
+ t.Fatalf("failed to write: %s", err)
+ }
+
+ // wait for the snapshot to write
+ time.Sleep(time.Second)
+
+ if _, err := s.Write(db, rp, "myseries,host=C value=25 9000000", nil); err != nil {
+ t.Fatalf("failed to write: %s", err)
+ }
+
+ // wait for the snapshot to write
+ time.Sleep(time.Second)
+
+ res, err := s.Query(`show series on mydb; show retention policies on mydb`)
+
+ res, err = s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
}
@@ -63,9 +85,18 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Fatal(err)
}
hostAddress := net.JoinHostPort("localhost", port)
- if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil {
+ if err := cmd.Run("-host", hostAddress, "-database", "mydb", fullBackupDir); err != nil {
+ t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
+ }
+
+ if err := cmd.Run("-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.007Z", partialBackupDir); err != nil {
+ t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
+ }
+
+ if err := cmd.Run("-enterprise", "-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.007Z", enterpriseBackupDir); err != nil {
t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
}
+
}()
if _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) {
@@ -76,10 +107,18 @@ func TestServer_BackupAndRestore(t *testing.T) {
t.Fatalf("meta dir should be deleted")
}
+ // if doing a real restore, these dirs should exist in the real DB.
+ if err := os.MkdirAll(config.Data.Dir, 0777); err != nil {
+ t.Fatalf("error making restore dir: %s", err.Error())
+ }
+ if err := os.MkdirAll(config.Meta.Dir, 0777); err != nil {
+ t.Fatalf("error making restore dir: %s", err.Error())
+ }
+
// restore
cmd := restore.NewCommand()
- if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil {
+ if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", fullBackupDir); err != nil {
t.Fatalf("error restoring: %s", err.Error())
}
@@ -93,6 +132,7 @@ func TestServer_BackupAndRestore(t *testing.T) {
s := OpenServer(config)
defer s.Close()
+ // 1. offline restore is correct
res, err := s.Query(`select * from "mydb"."forever"."myseries"`)
if err != nil {
t.Fatalf("error querying: %s", err.Error())
@@ -100,6 +140,94 @@ func TestServer_BackupAndRestore(t *testing.T) {
if res != expected {
t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
}
+
+ _, port, err := net.SplitHostPort(config.BindAddress)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // 2. online restore of a partial backup is correct.
+ hostAddress := net.JoinHostPort("localhost", port)
+ cmd.Run("-host", hostAddress, "-online", "-newdb", "mydbbak", "-db", "mydb", partialBackupDir)
+
+ // wait for the import to finish, and unlock the shard engine.
+ time.Sleep(time.Second)
+
+ res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+
+ if res != partialExpected {
+ t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
+ }
+
+ // 3. enterprise should be the same as the non-enterprise live restore
+ cmd.Run("-host", hostAddress, "-enterprise", "-newdb", "mydbbak2", "-db", "mydb", enterpriseBackupDir)
+
+ // wait for the import to finish, and unlock the shard engine.
+ time.Sleep(time.Second)
+
+ res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+
+ if res != partialExpected {
+ t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
+ }
+
+ // 4. backup all DB's, then drop them, then restore them and all 3 above tests should pass again.
+ // now backup
+ bCmd := backup.NewCommand()
+
+ if err := bCmd.Run("-enterprise", "-host", hostAddress, enterpriseBackupDir); err != nil {
+ t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress)
+ }
+
+ res, err = s.Query(`drop database mydb; drop database mydbbak; drop database mydbbak2;`)
+ if err != nil {
+ t.Fatalf("Error dropping databases %s", err.Error())
+ }
+
+ // 3. enterprise should be the same as the non-enterprise live restore
+ cmd.Run("-host", hostAddress, "-enterprise", enterpriseBackupDir)
+
+ // wait for the import to finish, and unlock the shard engine.
+ time.Sleep(3 * time.Second)
+
+ res, err = s.Query(`show shards`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+ fmt.Println(res)
+
+ res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+
+ if res != partialExpected {
+ t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
+ }
+
+ res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+
+ if res != partialExpected {
+ t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res)
+ }
+
+ res, err = s.Query(`select * from "mydb"."forever"."myseries"`)
+ if err != nil {
+ t.Fatalf("error querying: %s", err.Error())
+ }
+ if res != expected {
+ t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res)
+ }
+
}
func freePort() string {
diff --git a/vendor/github.com/influxdata/influxdb/tests/server_helpers.go b/vendor/github.com/influxdata/influxdb/tests/server_helpers.go
index 90891fb122..aeaaadcf9b 100644
--- a/vendor/github.com/influxdata/influxdb/tests/server_helpers.go
+++ b/vendor/github.com/influxdata/influxdb/tests/server_helpers.go
@@ -10,7 +10,9 @@ import (
"net/http"
"net/url"
"os"
+ "path/filepath"
"regexp"
+ "runtime"
"strings"
"sync"
"time"
@@ -24,6 +26,7 @@ import (
var verboseServerLogs bool
var indexType string
+var cleanupData bool
// Server represents a test wrapper for run.Server.
type Server interface {
@@ -162,7 +165,7 @@ func (s *RemoteServer) WritePoints(database, retentionPolicy string, consistency
}
// NewServer returns a new instance of Server.
-func NewServer(c *run.Config) Server {
+func NewServer(c *Config) Server {
buildInfo := &run.BuildInfo{
Version: "testServer",
Commit: "testCommit",
@@ -186,7 +189,7 @@ func NewServer(c *run.Config) Server {
}
// Otherwise create a local server
- srv, _ := run.NewServer(c, buildInfo)
+ srv, _ := run.NewServer(c.Config, buildInfo)
s := LocalServer{
client: &client{},
Server: srv,
@@ -197,7 +200,7 @@ func NewServer(c *run.Config) Server {
}
// OpenServer opens a test server.
-func OpenServer(c *run.Config) Server {
+func OpenServer(c *Config) Server {
s := NewServer(c)
configureLogging(s)
if err := s.Open(); err != nil {
@@ -207,8 +210,8 @@ func OpenServer(c *run.Config) Server {
}
// OpenServerWithVersion opens a test server with a specific version.
-func OpenServerWithVersion(c *run.Config, version string) Server {
- // We can't change the versino of a remote server. The test needs to
+func OpenServerWithVersion(c *Config, version string) Server {
+ // We can't change the version of a remote server. The test needs to
// be skipped if using this func.
if RemoteEnabled() {
panic("OpenServerWithVersion not support with remote server")
@@ -219,7 +222,7 @@ func OpenServerWithVersion(c *run.Config, version string) Server {
Commit: "",
Branch: "",
}
- srv, _ := run.NewServer(c, buildInfo)
+ srv, _ := run.NewServer(c.Config, buildInfo)
s := LocalServer{
client: &client{},
Server: srv,
@@ -236,9 +239,9 @@ func OpenServerWithVersion(c *run.Config, version string) Server {
}
// OpenDefaultServer opens a test server with a default database & retention policy.
-func OpenDefaultServer(c *run.Config) Server {
+func OpenDefaultServer(c *Config) Server {
s := OpenServer(c)
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
panic(err)
}
return s
@@ -250,7 +253,16 @@ type LocalServer struct {
*run.Server
*client
- Config *run.Config
+ Config *Config
+}
+
+// Open opens the server. If running this test on a 32-bit platform it reduces
+// the size of series files so that they can all be addressable in the process.
+func (s *LocalServer) Open() error {
+ if runtime.GOARCH == "386" {
+ s.Server.TSDBStore.SeriesFileMaxSize = 1 << 27 // 128MB
+ }
+ return s.Server.Open()
}
// Close shuts down the server and removes all temporary paths.
@@ -261,12 +273,13 @@ func (s *LocalServer) Close() {
if err := s.Server.Close(); err != nil {
panic(err.Error())
}
- if err := os.RemoveAll(s.Config.Meta.Dir); err != nil {
- panic(err.Error())
- }
- if err := os.RemoveAll(s.Config.Data.Dir); err != nil {
- panic(err.Error())
+
+ if cleanupData {
+ if err := os.RemoveAll(s.Config.rootPath); err != nil {
+ panic(err.Error())
+ }
}
+
// Nil the server so our deadlock detector goroutine can determine if we completed writes
// without timing out
s.Server = nil
@@ -477,17 +490,30 @@ func (s *client) MustWrite(db, rp, body string, params url.Values) string {
return results
}
+// Config is a test wrapper around a run.Config. It also contains a root temp
+// directory, making cleanup easier.
+type Config struct {
+ rootPath string
+ *run.Config
+}
+
// NewConfig returns the default config with temporary paths.
-func NewConfig() *run.Config {
- c := run.NewConfig()
+func NewConfig() *Config {
+ root, err := ioutil.TempDir("", "tests-influxdb-")
+ if err != nil {
+ panic(err)
+ }
+
+ c := &Config{rootPath: root, Config: run.NewConfig()}
c.BindAddress = "127.0.0.1:0"
c.ReportingDisabled = true
c.Coordinator.WriteTimeout = toml.Duration(30 * time.Second)
- c.Meta.Dir = MustTempFile()
+
+ c.Meta.Dir = filepath.Join(c.rootPath, "meta")
c.Meta.LoggingEnabled = verboseServerLogs
- c.Data.Dir = MustTempFile()
- c.Data.WALDir = MustTempFile()
+ c.Data.Dir = filepath.Join(c.rootPath, "data")
+ c.Data.WALDir = filepath.Join(c.rootPath, "wal")
c.Data.QueryLogEnabled = verboseServerLogs
c.Data.TraceLoggingEnabled = verboseServerLogs
c.Data.Index = indexType
@@ -503,7 +529,8 @@ func NewConfig() *run.Config {
return c
}
-func newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {
+// form a correct retention policy given name, replication factor and duration
+func NewRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {
return &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration}
}
@@ -547,17 +574,6 @@ func MustReadAll(r io.Reader) []byte {
return b
}
-// MustTempFile returns a path to a temporary file.
-func MustTempFile() string {
- f, err := ioutil.TempFile("", "influxd-")
- if err != nil {
- panic(err)
- }
- f.Close()
- os.Remove(f.Name())
- return f.Name()
-}
-
func RemoteEnabled() bool {
return os.Getenv("URL") != ""
}
@@ -718,7 +734,7 @@ func writeTestData(s Server, t *Test) error {
w.rp = t.retentionPolicy()
}
- if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(w.db, NewRetentionPolicySpec(w.rp, 1, 0), true); err != nil {
return err
}
if res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil {
diff --git a/vendor/github.com/influxdata/influxdb/tests/server_suite.go b/vendor/github.com/influxdata/influxdb/tests/server_suite.go
index 4db7b982c2..0cad2142f2 100644
--- a/vendor/github.com/influxdata/influxdb/tests/server_suite.go
+++ b/vendor/github.com/influxdata/influxdb/tests/server_suite.go
@@ -295,6 +295,12 @@ func init() {
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverB","uswest",23.2],["2000-01-03T00:00:00Z","serverA","uswest",200]]}]}]}`,
params: url.Values{"db": []string{"db0"}},
},
+ &Query{
+ name: "Make sure other points are deleted",
+ command: `SELECT COUNT(val) FROM cpu WHERE "host" = 'serverA'`,
+ exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
+ params: url.Values{"db": []string{"db0"}},
+ },
&Query{
name: "Make sure data wasn't deleted from other database.",
command: `SELECT * FROM cpu`,
diff --git a/vendor/github.com/influxdata/influxdb/tests/server_test.go b/vendor/github.com/influxdata/influxdb/tests/server_test.go
index fef7543f4b..8356ac43c9 100644
--- a/vendor/github.com/influxdata/influxdb/tests/server_test.go
+++ b/vendor/github.com/influxdata/influxdb/tests/server_test.go
@@ -9,6 +9,7 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"testing"
"time"
@@ -21,10 +22,10 @@ import (
var benchServer Server
func TestMain(m *testing.M) {
- vv := flag.Bool("vv", false, "Turn on very verbose server logging.")
+ flag.BoolVar(&verboseServerLogs, "vv", false, "Turn on very verbose server logging.")
+ flag.BoolVar(&cleanupData, "clean", true, "Clean up test data on disk.")
flag.Parse()
- verboseServerLogs = *vv
var r int
for _, indexType = range tsdb.RegisteredIndexes() {
// Setup benchmark server
@@ -101,7 +102,7 @@ func TestServer_Query_DropAndRecreateDatabase(t *testing.T) {
test := tests.load(t, "drop_and_recreate_database")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -131,10 +132,10 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
test := tests.load(t, "drop_database_isolated")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
- if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp1", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp1", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -164,7 +165,7 @@ func TestServer_Query_DeleteSeries(t *testing.T) {
test := tests.load(t, "delete_series_time")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -193,7 +194,7 @@ func TestServer_Query_DeleteSeries_TagFilter(t *testing.T) {
test := tests.load(t, "delete_series_time_tag_filter")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -223,7 +224,7 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
test := tests.load(t, "drop_and_recreate_series")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -274,7 +275,7 @@ func TestServer_Query_DropSeriesFromRegex(t *testing.T) {
test := tests.load(t, "drop_series_from_regex")
- if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -566,7 +567,7 @@ func TestServer_Write_FieldTypeConflict(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -617,7 +618,7 @@ func TestServer_Write_LineProtocol_Float(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -642,7 +643,7 @@ func TestServer_Write_LineProtocol_Bool(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -667,7 +668,7 @@ func TestServer_Write_LineProtocol_String(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -692,7 +693,7 @@ func TestServer_Write_LineProtocol_Integer(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -717,7 +718,7 @@ func TestServer_Write_LineProtocol_Unsigned(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -743,7 +744,7 @@ func TestServer_Write_LineProtocol_Partial(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil {
t.Fatal(err)
}
@@ -3085,7 +3086,7 @@ func TestServer_Query_MergeMany(t *testing.T) {
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -3146,7 +3147,7 @@ func TestServer_Query_SLimitAndSOffset(t *testing.T) {
defer s.Close()
// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -3203,7 +3204,7 @@ func TestServer_Query_Regex(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4199,7 +4200,7 @@ func TestServer_Query_Aggregates_Math(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4266,7 +4267,7 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4578,7 +4579,7 @@ func TestServer_Query_ExactTimeRange(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4633,7 +4634,7 @@ func TestServer_Query_Selectors(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4712,7 +4713,7 @@ func TestServer_Query_TopBottomInt(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -4966,7 +4967,7 @@ func TestServer_Query_TopBottomWriteTags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5035,7 +5036,7 @@ func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5101,7 +5102,7 @@ func TestServer_Query_GroupByTimeCutoffs(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5181,7 +5182,7 @@ func TestServer_Query_MapType(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5257,7 +5258,7 @@ func TestServer_Query_Subqueries(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5418,7 +5419,7 @@ func TestServer_Query_SubqueryWithGroupBy(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5490,7 +5491,7 @@ func TestServer_Query_SubqueryMath(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5536,7 +5537,7 @@ func TestServer_Query_PercentileDerivative(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5585,7 +5586,7 @@ func TestServer_Query_UnderscoreMeasurement(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5630,7 +5631,7 @@ func TestServer_Write_Precision(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5745,7 +5746,7 @@ func TestServer_Query_Wildcards(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5836,13 +5837,15 @@ func TestServer_Query_Wildcards(t *testing.T) {
},
}...)
- for i, query := range test.queries {
+ var once sync.Once
+ for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
- if i == 0 {
+ once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
- }
+ })
+
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}
@@ -5861,7 +5864,7 @@ func TestServer_Query_WildcardExpansion(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -5939,7 +5942,7 @@ func TestServer_Query_AcrossShardsAndFields(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6011,7 +6014,7 @@ func TestServer_Query_OrderedAcrossShards(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6079,7 +6082,7 @@ func TestServer_Query_Where_Fields(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6299,7 +6302,7 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6355,13 +6358,14 @@ func TestServer_Query_Where_With_Tags(t *testing.T) {
},
}...)
- for i, query := range test.queries {
+ var once sync.Once
+ for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
- if i == 0 {
+ once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
- }
+ })
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}
@@ -6379,7 +6383,7 @@ func TestServer_Query_With_EmptyTags(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6480,7 +6484,7 @@ func TestServer_Query_LimitAndOffset(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6595,7 +6599,7 @@ func TestServer_Query_Fill(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6700,7 +6704,7 @@ func TestServer_Query_ImplicitFill(t *testing.T) {
s := OpenServer(config)
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6754,7 +6758,7 @@ func TestServer_Query_TimeZone(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6850,7 +6854,7 @@ func TestServer_Query_Chunk(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -6902,10 +6906,10 @@ func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
- if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7064,7 +7068,7 @@ func TestServer_Query_ShowQueries_Future(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7135,7 +7139,7 @@ func TestServer_Query_ShowSeries(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7241,13 +7245,14 @@ func TestServer_Query_ShowSeries(t *testing.T) {
},
}...)
- for i, query := range test.queries {
+ var once sync.Once
+ for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
- if i == 0 {
+ once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
- }
+ })
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}
@@ -7269,7 +7274,7 @@ func TestServer_Query_ShowSeriesCardinalityEstimation(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7339,7 +7344,7 @@ func TestServer_Query_ShowSeriesExactCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7475,7 +7480,7 @@ func TestServer_Query_ShowStats(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7517,7 +7522,7 @@ func TestServer_Query_ShowMeasurements(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7638,15 +7643,14 @@ func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
test := NewTest("db0", "rp0")
test.writes = make(Writes, 0, 10)
- // Add 1,000,000 series.
for j := 0; j < cap(test.writes); j++ {
- writes := make([]string, 0, 50000)
+ writes := make([]string, 0, 10000)
for i := 0; i < cap(writes); i++ {
writes = append(writes, fmt.Sprintf(`cpu-%d-s%d v=1 %d`, j, i, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()))
}
@@ -7696,7 +7700,7 @@ func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) {
}
cardinality := got.Results[0].Series[0].Values[0][0]
- if cardinality < 450000 || cardinality > 550000 {
+ if cardinality < 50000 || cardinality > 150000 {
t.Errorf("got cardinality %d, which is 10%% or more away from expected estimation of 500,000", cardinality)
}
})
@@ -7708,7 +7712,7 @@ func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7822,14 +7826,11 @@ func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) {
}
func TestServer_Query_ShowTagKeys(t *testing.T) {
- // TODO(benbjohnson): To be addressed in upcoming PR.
- t.SkipNow()
-
t.Parallel()
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -7941,7 +7942,7 @@ func TestServer_Query_ShowTagValues(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8095,13 +8096,14 @@ func TestServer_Query_ShowTagValues(t *testing.T) {
},
}...)
- for i, query := range test.queries {
+ var once sync.Once
+ for _, query := range test.queries {
t.Run(query.name, func(t *testing.T) {
- if i == 0 {
+ once.Do(func() {
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
- }
+ })
if query.skip {
t.Skipf("SKIP:: %s", query.name)
}
@@ -8119,7 +8121,7 @@ func TestServer_Query_ShowTagKeyCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8319,7 +8321,7 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8383,7 +8385,7 @@ func TestServer_Query_ShowFieldKeyCardinality(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8465,7 +8467,7 @@ func TestServer_ContinuousQuery(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8586,7 +8588,7 @@ func TestServer_ContinuousQuery_Deadlock(t *testing.T) {
s.Close()
}()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8657,7 +8659,7 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8699,7 +8701,7 @@ func TestServer_Query_OrderByTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8777,7 +8779,7 @@ func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8828,7 +8830,7 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8879,7 +8881,7 @@ func TestServer_Query_IntoTarget(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8936,7 +8938,7 @@ func TestServer_Query_IntoTarget_Sparse(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -8995,7 +8997,7 @@ func TestServer_Query_DuplicateMeasurements(t *testing.T) {
defer s.Close()
// Create a second database.
- if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9096,7 +9098,7 @@ func TestServer_Query_DotProduct(t *testing.T) {
defer s.Close()
// Create a second database.
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9176,7 +9178,7 @@ func TestServer_WhereTimeInclusive(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9284,7 +9286,7 @@ func TestServer_Query_ImplicitEndTime(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9339,7 +9341,7 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9384,7 +9386,7 @@ func TestServer_Query_Sample_LimitOffset(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
@@ -9444,7 +9446,7 @@ func TestServer_NestedAggregateWithMathPanics(t *testing.T) {
s := OpenServer(NewConfig())
defer s.Close()
- if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
+ if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine.go b/vendor/github.com/influxdata/influxdb/tsdb/engine.go
index c935d4febc..fc781fbfe7 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine.go
@@ -45,7 +45,7 @@ type Engine interface {
Export(w io.Writer, basePath string, start time.Time, end time.Time) error
Restore(r io.Reader, basePath string) error
Import(r io.Reader, basePath string) error
- Digest() (io.ReadCloser, error)
+ Digest() (io.ReadCloser, int64, error)
CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error)
CreateCursor(ctx context.Context, r *CursorRequest) (Cursor, error)
@@ -54,32 +54,23 @@ type Engine interface {
CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error
CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error
- DeleteSeriesRange(itr SeriesIterator, min, max int64) error
+ DeleteSeriesRange(itr SeriesIterator, min, max int64, removeIndex bool) error
- SeriesSketches() (estimator.Sketch, estimator.Sketch, error)
MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error)
SeriesN() int64
MeasurementExists(name []byte) (bool, error)
- MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error)
+
MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)
+ MeasurementFieldSet() *MeasurementFieldSet
MeasurementFields(measurement []byte) *MeasurementFields
ForEachMeasurementName(fn func(name []byte) error) error
DeleteMeasurement(name []byte) error
- MeasurementFieldSet() *MeasurementFieldSet
HasTagKey(name, key []byte) (bool, error)
MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)
- MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error)
- TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool
- ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error
TagKeyCardinality(name, key []byte) int
- // InfluxQL iterators
- MeasurementSeriesKeysByExprIterator(name []byte, expr influxql.Expr) (SeriesIterator, error)
- MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error)
- SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error)
-
// Statistics will return statistics relevant to this engine.
Statistics(tags map[string]string) []models.Statistic
LastModified() time.Time
@@ -99,7 +90,7 @@ const (
)
// NewEngineFunc creates a new engine.
-type NewEngineFunc func(id uint64, i Index, database, path string, walPath string, options EngineOptions) Engine
+type NewEngineFunc func(id uint64, i Index, database, path string, walPath string, sfile *SeriesFile, options EngineOptions) Engine
// newEngineFuncs is a lookup of engine constructors by name.
var newEngineFuncs = make(map[string]NewEngineFunc)
@@ -124,10 +115,10 @@ func RegisteredEngines() []string {
// NewEngine returns an instance of an engine based on its format.
// If the path does not exist then the DefaultFormat is used.
-func NewEngine(id uint64, i Index, database, path string, walPath string, options EngineOptions) (Engine, error) {
+func NewEngine(id uint64, i Index, database, path string, walPath string, sfile *SeriesFile, options EngineOptions) (Engine, error) {
// Create a new engine
if _, err := os.Stat(path); os.IsNotExist(err) {
- return newEngineFuncs[options.EngineVersion](id, i, database, path, walPath, options), nil
+ return newEngineFuncs[options.EngineVersion](id, i, database, path, walPath, sfile, options), nil
}
// If it's a dir then it's a tsm1 engine
@@ -146,7 +137,7 @@ func NewEngine(id uint64, i Index, database, path string, walPath string, option
return nil, fmt.Errorf("invalid engine format: %q", format)
}
- return fn(id, i, database, path, walPath, options), nil
+ return fn(id, i, database, path, walPath, sfile, options), nil
}
// EngineOptions represents the options used to initialize the engine.
@@ -172,4 +163,4 @@ func NewEngineOptions() EngineOptions {
}
// NewInmemIndex returns a new "inmem" index type.
-var NewInmemIndex func(name string) (interface{}, error)
+var NewInmemIndex func(name string, sfile *SeriesFile) (interface{}, error)
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go
index f9b340fba2..6506796afa 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go
@@ -39,26 +39,14 @@ type entry struct {
// The type of values stored. Read only so doesn't need to be protected by
// mu.
- vtype int
+ vtype byte
}
// newEntryValues returns a new instance of entry with the given values. If the
// values are not valid, an error is returned.
-//
-// newEntryValues takes an optional hint to indicate the initial buffer size.
-// The hint is only respected if it's positive.
-func newEntryValues(values []Value, hint int) (*entry, error) {
- // Ensure we start off with a reasonably sized values slice.
- if hint < 32 {
- hint = 32
- }
-
+func newEntryValues(values []Value) (*entry, error) {
e := &entry{}
- if len(values) > hint {
- e.values = make(Values, 0, len(values))
- } else {
- e.values = make(Values, 0, hint)
- }
+ e.values = make(Values, 0, len(values))
e.values = append(e.values, values...)
// No values, don't check types and ordering
@@ -87,22 +75,19 @@ func (e *entry) add(values []Value) error {
}
// Are any of the new values the wrong type?
- for _, v := range values {
- if e.vtype != valueType(v) {
- return tsdb.ErrFieldTypeConflict
+ if e.vtype != 0 {
+ for _, v := range values {
+ if e.vtype != valueType(v) {
+ return tsdb.ErrFieldTypeConflict
+ }
}
}
// entry currently has no values, so add the new ones and we're done.
e.mu.Lock()
if len(e.values) == 0 {
- // Ensure we start off with a reasonably sized values slice.
- if len(values) < 32 {
- e.values = make(Values, 0, 32)
- e.values = append(e.values, values...)
- } else {
- e.values = values
- }
+ e.values = values
+ e.vtype = valueType(values[0])
e.mu.Unlock()
return nil
}
@@ -777,7 +762,7 @@ func (c *Cache) updateMemSize(b int64) {
atomic.AddInt64(&c.stats.MemSizeBytes, b)
}
-func valueType(v Value) int {
+func valueType(v Value) byte {
switch v.(type) {
case FloatValue:
return 1
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go
index d7edb7ebd0..309b428972 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go
@@ -956,7 +956,7 @@ func BenchmarkEntry_add(b *testing.B) {
otherValues[i] = NewValue(1, float64(i))
}
- entry, err := newEntryValues(values, 0) // Will use default allocation size.
+ entry, err := newEntryValues(values)
if err != nil {
b.Fatal(err)
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go
index ad85e44ad8..1174555f0d 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go
@@ -6,6 +6,8 @@
package tsm1
+import "sort"
+
// merge combines the next set of blocks into merged blocks.
func (k *tsmKeyIterator) mergeFloat() {
// No blocks left, or pending merged values, we're done
@@ -13,6 +15,8 @@ func (k *tsmKeyIterator) mergeFloat() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.mergedFloatValues) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -22,7 +26,7 @@ func (k *tsmKeyIterator) mergeFloat() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
@@ -210,6 +214,8 @@ func (k *tsmKeyIterator) mergeInteger() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.mergedIntegerValues) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -219,7 +225,7 @@ func (k *tsmKeyIterator) mergeInteger() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
@@ -407,6 +413,8 @@ func (k *tsmKeyIterator) mergeUnsigned() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.mergedUnsignedValues) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -416,7 +424,7 @@ func (k *tsmKeyIterator) mergeUnsigned() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
@@ -604,6 +612,8 @@ func (k *tsmKeyIterator) mergeString() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.mergedStringValues) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -613,7 +623,7 @@ func (k *tsmKeyIterator) mergeString() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
@@ -801,6 +811,8 @@ func (k *tsmKeyIterator) mergeBoolean() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.mergedBooleanValues) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -810,7 +822,7 @@ func (k *tsmKeyIterator) mergeBoolean() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl
index 0b710995f3..ee3def4c5a 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl
@@ -1,5 +1,7 @@
package tsm1
+import "sort"
+
{{range .}}
// merge combines the next set of blocks into merged blocks.
@@ -9,6 +11,8 @@ func (k *tsmKeyIterator) merge{{.Name}}() {
return
}
+ sort.Stable(k.blocks)
+
dedup := len(k.merged{{.Name}}Values) != 0
if len(k.blocks) > 0 && !dedup {
// If we have more than one block or any partially tombstoned blocks, we many need to dedup
@@ -18,7 +22,7 @@ func (k *tsmKeyIterator) merge{{.Name}}() {
// we need to dedup as there may be duplicate points now
for i := 1; !dedup && i < len(k.blocks); i++ {
dedup = k.blocks[i].partiallyRead() ||
- k.blocks[i].minTime <= k.blocks[i-1].maxTime ||
+ k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) ||
len(k.blocks[i].tombstones) > 0
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go
index 2e58058c53..568c5d9565 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go
@@ -1270,7 +1270,7 @@ func (a blocks) Len() int { return len(a) }
func (a blocks) Less(i, j int) bool {
cmp := bytes.Compare(a[i].key, a[j].key)
if cmp == 0 {
- return a[i].minTime < a[j].minTime
+ return a[i].minTime < a[j].minTime && a[i].maxTime < a[j].minTime
}
return cmp < 0
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go
index 617b7567a6..72e2699569 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go
@@ -347,6 +347,150 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
}
}
+func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
+ dir := MustTempDir()
+ defer os.RemoveAll(dir)
+
+ // write 2 TSM files with different data and one new point
+ a1 := tsm1.NewValue(4, 1.1)
+ a2 := tsm1.NewValue(5, 1.1)
+ a3 := tsm1.NewValue(6, 1.1)
+
+ writes := map[string][]tsm1.Value{
+ "cpu,host=A#!~#value": []tsm1.Value{a1, a2, a3},
+ }
+ f1 := MustWriteTSM(dir, 1, writes)
+
+ b1 := tsm1.NewValue(1, 1.2)
+ b2 := tsm1.NewValue(2, 1.2)
+ b3 := tsm1.NewValue(3, 1.2)
+
+ writes = map[string][]tsm1.Value{
+ "cpu,host=A#!~#value": []tsm1.Value{b1, b2, b3},
+ }
+ f2 := MustWriteTSM(dir, 2, writes)
+
+ compactor := &tsm1.Compactor{
+ Dir: dir,
+ FileStore: &fakeFileStore{},
+ Size: 2,
+ }
+
+ compactor.Open()
+
+ files, err := compactor.CompactFast([]string{f1, f2})
+ if err != nil {
+ t.Fatalf("unexpected error writing snapshot: %v", err)
+ }
+
+ if got, exp := len(files), 1; got != exp {
+ t.Fatalf("files length mismatch: got %v, exp %v", got, exp)
+ }
+
+ r := MustOpenTSMReader(files[0])
+
+ if got, exp := r.KeyCount(), 1; got != exp {
+ t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
+ }
+
+ var data = []struct {
+ key string
+ points []tsm1.Value
+ }{
+ {"cpu,host=A#!~#value", []tsm1.Value{b1, b2, b3, a1, a2, a3}},
+ }
+
+ for _, p := range data {
+ values, err := r.ReadAll([]byte(p.key))
+ if err != nil {
+ t.Fatalf("unexpected error reading: %v", err)
+ }
+
+ if got, exp := len(values), len(p.points); got != exp {
+ t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp)
+ }
+
+ for i, point := range p.points {
+ assertValueEqual(t, values[i], point)
+ }
+ }
+}
+
+func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
+ dir := MustTempDir()
+ defer os.RemoveAll(dir)
+
+ // write 3 TSM files where two blocks are overlapping and with unsorted order
+ a1 := tsm1.NewValue(1, 1.1)
+ a2 := tsm1.NewValue(2, 1.1)
+
+ writes := map[string][]tsm1.Value{
+ "cpu,host=A#!~#value": []tsm1.Value{a1, a2},
+ }
+ f1 := MustWriteTSM(dir, 1, writes)
+
+ b1 := tsm1.NewValue(3, 1.2)
+ b2 := tsm1.NewValue(4, 1.2)
+
+ writes = map[string][]tsm1.Value{
+ "cpu,host=A#!~#value": []tsm1.Value{b1, b2},
+ }
+ f2 := MustWriteTSM(dir, 2, writes)
+
+ c1 := tsm1.NewValue(1, 1.1)
+ c2 := tsm1.NewValue(2, 1.1)
+
+ writes = map[string][]tsm1.Value{
+ "cpu,host=A#!~#value": []tsm1.Value{c1, c2},
+ }
+ f3 := MustWriteTSM(dir, 3, writes)
+
+ compactor := &tsm1.Compactor{
+ Dir: dir,
+ FileStore: &fakeFileStore{},
+ Size: 2,
+ }
+
+ compactor.Open()
+
+ files, err := compactor.CompactFast([]string{f1, f2, f3})
+ if err != nil {
+ t.Fatalf("unexpected error writing snapshot: %v", err)
+ }
+
+ if got, exp := len(files), 1; got != exp {
+ t.Fatalf("files length mismatch: got %v, exp %v", got, exp)
+ }
+
+ r := MustOpenTSMReader(files[0])
+
+ if got, exp := r.KeyCount(), 1; got != exp {
+ t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
+ }
+
+ var data = []struct {
+ key string
+ points []tsm1.Value
+ }{
+ {"cpu,host=A#!~#value", []tsm1.Value{a1, a2, b1, b2}},
+ }
+
+ for _, p := range data {
+ values, err := r.ReadAll([]byte(p.key))
+ if err != nil {
+ t.Fatalf("unexpected error reading: %v", err)
+ }
+
+ if got, exp := len(values), len(p.points); got != exp {
+ t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp)
+ }
+
+ for i, point := range p.points {
+ assertValueEqual(t, values[i], point)
+ }
+ }
+}
+
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
dir := MustTempDir()
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go
index 9bc17273e2..f1a61baa3d 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go
@@ -25,12 +25,12 @@ import (
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/pkg/metrics"
+ intar "github.com/influxdata/influxdb/pkg/tar"
"github.com/influxdata/influxdb/pkg/tracing"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb"
_ "github.com/influxdata/influxdb/tsdb/index"
"github.com/influxdata/influxdb/tsdb/index/inmem"
- "github.com/influxdata/influxdb/tsdb/index/tsi1"
"github.com/influxdata/influxql"
"go.uber.org/zap"
)
@@ -145,6 +145,7 @@ type Engine struct {
id uint64
database string
path string
+ sfile *tsdb.SeriesFile
logger *zap.Logger // Logger to be used for important messages
traceLogger *zap.Logger // Logger to be used when trace-logging is on.
traceLogging bool
@@ -180,7 +181,7 @@ type Engine struct {
}
// NewEngine returns a new instance of Engine.
-func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine {
+func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Engine {
w := NewWAL(walPath)
w.syncDelay = time.Duration(opt.Config.WALFsyncDelay)
@@ -200,6 +201,7 @@ func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string,
database: database,
path: path,
index: idx,
+ sfile: sfile,
logger: logger,
traceLogger: logger,
traceLogging: opt.Config.TraceLoggingEnabled,
@@ -228,7 +230,7 @@ func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string,
}
// Digest returns a reader for the shard's digest.
-func (e *Engine) Digest() (io.ReadCloser, error) {
+func (e *Engine) Digest() (io.ReadCloser, int64, error) {
digestPath := filepath.Join(e.path, "digest.tsd")
// See if there's an existing digest file on disk.
@@ -238,16 +240,21 @@ func (e *Engine) Digest() (io.ReadCloser, error) {
fi, err := f.Stat()
if err != nil {
f.Close()
- return nil, err
+ return nil, 0, err
}
if !e.LastModified().After(fi.ModTime()) {
// Existing digest is still fresh so return a reader for it.
- return f, nil
+ fi, err := f.Stat()
+ if err != nil {
+ f.Close()
+ return nil, 0, err
+ }
+ return f, fi.Size(), nil
}
if err := f.Close(); err != nil {
- return nil, err
+ return nil, 0, err
}
}
@@ -257,23 +264,34 @@ func (e *Engine) Digest() (io.ReadCloser, error) {
// Create a tmp file to write the digest to.
tf, err := os.Create(digestPath + ".tmp")
if err != nil {
- return nil, err
+ return nil, 0, err
}
// Write the new digest to the tmp file.
if err := Digest(e.path, tf); err != nil {
tf.Close()
os.Remove(tf.Name())
- return nil, err
+ return nil, 0, err
}
// Rename the temporary digest file to the actual digest file.
if err := renameFile(tf.Name(), digestPath); err != nil {
- return nil, err
+ return nil, 0, err
}
// Create and return a reader for the new digest file.
- return os.Open(digestPath)
+ f, err = os.Open(digestPath)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ fi, err := f.Stat()
+ if err != nil {
+ f.Close()
+ return nil, 0, err
+ }
+
+ return f, fi.Size(), nil
}
// SetEnabled sets whether the engine is enabled.
@@ -475,23 +493,20 @@ func (e *Engine) MeasurementExists(name []byte) (bool, error) {
return e.index.MeasurementExists(name)
}
-func (e *Engine) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
- return e.index.MeasurementNamesByExpr(auth, expr)
-}
-
func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return e.index.MeasurementNamesByRegex(re)
}
+// MeasurementFieldSet returns the measurement field set.
+func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet {
+ return e.fieldset
+}
+
// MeasurementFields returns the measurement fields for a measurement.
func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {
return e.fieldset.CreateFieldsIfNotExists(measurement)
}
-func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet {
- return e.fieldset
-}
-
func (e *Engine) HasTagKey(name, key []byte) (bool, error) {
return e.index.HasTagKey(name, key)
}
@@ -500,29 +515,6 @@ func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[
return e.index.MeasurementTagKeysByExpr(name, expr)
}
-// TagKeyHasAuthorizedSeries determines if there exist authorized series for the
-// provided measurement name and tag key.
-func (e *Engine) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool {
- return e.index.TagKeyHasAuthorizedSeries(auth, name, key)
-}
-
-// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
-//
-// MeasurementTagKeyValuesByExpr relies on the provided tag keys being sorted.
-// The caller can indicate the tag keys have been sorted by setting the
-// keysSorted argument appropriately. Tag values are returned in a slice that
-// is indexible according to the sorted order of the tag keys, e.g., the values
-// for the earliest tag k will be available in index 0 of the returned values
-// slice.
-//
-func (e *Engine) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
- return e.index.MeasurementTagKeyValuesByExpr(auth, name, keys, expr, keysSorted)
-}
-
-func (e *Engine) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
- return e.index.ForEachMeasurementTagKey(name, fn)
-}
-
func (e *Engine) TagKeyCardinality(name, key []byte) int {
return e.index.TagKeyCardinality(name, key)
}
@@ -532,10 +524,6 @@ func (e *Engine) SeriesN() int64 {
return e.index.SeriesN()
}
-func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
- return e.index.SeriesSketches()
-}
-
func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
return e.index.MeasurementsSketches()
}
@@ -719,9 +707,9 @@ func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {
}
if err := e.FileStore.WalkKeys(nil, func(key []byte, typ byte) error {
- fieldType, err := tsmFieldTypeToInfluxQLDataType(typ)
- if err != nil {
- return err
+ fieldType := BlockTypeToInfluxQLDataType(typ)
+ if fieldType == influxql.Unknown {
+ return fmt.Errorf("unknown block type: %v", typ)
}
if err := e.addToIndexFromKey(key, fieldType); err != nil {
@@ -789,73 +777,20 @@ func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
if err != nil {
return err
}
-
- tw := tar.NewWriter(w)
- defer tw.Close()
-
// Remove the temporary snapshot dir
defer os.RemoveAll(path)
- // Recursively read all files from path.
- files, err := readDir(path, "")
- if err != nil {
- return err
- }
-
- // Filter paths to only changed files.
- var filtered []string
- for _, file := range files {
- fi, err := os.Stat(filepath.Join(path, file))
- if err != nil {
- return err
- } else if !fi.ModTime().After(since) {
- continue
- }
- filtered = append(filtered, file)
- }
- if len(filtered) == 0 {
- return nil
- }
-
- for _, f := range filtered {
- if err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil {
- return err
- }
- }
-
- return nil
+ return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since))
}
-func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
- path, err := e.CreateSnapshot()
- if err != nil {
- return err
- }
-
- // Remove the temporary snapshot dir
- defer os.RemoveAll(path)
- if err := e.index.SnapshotTo(path); err != nil {
- return err
- }
-
- tw := tar.NewWriter(w)
- defer tw.Close()
-
- // Recursively read all files from path.
- files, err := readDir(path, "")
- if err != nil {
- return err
- }
-
- for _, file := range files {
- if !strings.HasSuffix(file, ".tsm") {
- if err := e.writeFileToBackup(file, basePath, filepath.Join(path, file), tw); err != nil {
- return err
- }
+func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
+ return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error {
+ if !strings.HasSuffix(fi.Name(), ".tsm") {
+ return intar.StreamFile(fi, shardRelativePath, fullPath, tw)
}
var tombstonePath string
- f, err := os.Open(filepath.Join(path, file))
+ f, err := os.Open(fullPath)
if err != nil {
return err
}
@@ -867,6 +802,7 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
// Grab the tombstone file if one exists.
if r.HasTombstones() {
tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path)
+ return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw)
}
min, max := r.TimeRange()
@@ -877,7 +813,7 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
if min >= stun && min <= eun && max > eun || // overlap to the right
max >= stun && max <= eun && min < stun || // overlap to the left
min <= stun && max >= eun { // TSM file has a range LARGER than the boundary
- err := e.filterFileToBackup(r, file, basePath, filepath.Join(path, file), start.UnixNano(), end.UnixNano(), tw)
+ err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw)
if err != nil {
if err := r.Close(); err != nil {
return err
@@ -894,24 +830,26 @@ func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.
// the TSM file is 100% inside the range, so we can just write it without scanning each block
if min >= start.UnixNano() && max <= end.UnixNano() {
- if err := e.writeFileToBackup(file, basePath, filepath.Join(path, file), tw); err != nil {
- return err
- }
- }
-
- // if this TSM file had a tombstone we'll write out the whole thing too.
- if tombstonePath != "" {
- if err := e.writeFileToBackup(tombstonePath, basePath, filepath.Join(path, tombstonePath), tw); err != nil {
+ if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil {
return err
}
}
+ return nil
+ }
+}
+func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error {
+ path, err := e.CreateSnapshot()
+ if err != nil {
+ return err
}
+ // Remove the temporary snapshot dir
+ defer os.RemoveAll(path)
- return nil
+ return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end))
}
-func (e *Engine) filterFileToBackup(r *TSMReader, name, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
+func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error {
path := fullPath + ".tmp"
out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
@@ -959,36 +897,7 @@ func (e *Engine) filterFileToBackup(r *TSMReader, name, shardRelativePath, fullP
return err
}
- return e.writeFileToBackup(name, shardRelativePath, path, tw)
-}
-
-// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath
-// in their names. This should be the // part of the path.
-func (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error {
- f, err := os.Stat(fullPath)
- if err != nil {
- return err
- }
-
- h := &tar.Header{
- Name: filepath.ToSlash(filepath.Join(shardRelativePath, name)),
- ModTime: f.ModTime(),
- Size: f.Size(),
- Mode: int64(f.Mode()),
- }
- if err := tw.WriteHeader(h); err != nil {
- return err
- }
- fr, err := os.Open(fullPath)
- if err != nil {
- return err
- }
-
- defer fr.Close()
-
- _, err = io.CopyN(tw, fr, h.Size)
-
- return err
+ return intar.StreamFile(fi, shardRelativePath, path, tw)
}
// Restore reads a tar archive generated by Backup().
@@ -1050,12 +959,16 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
readers := make([]chan seriesKey, 0, len(newFiles))
ext := fmt.Sprintf(".%s", TmpTSMFileExtension)
for _, f := range newFiles {
- ch := make(chan seriesKey, 1)
- readers = append(readers, ch)
-
// If asNew is true, the files created from readFileFromBackup will be new ones
// having a temp extension.
f = strings.TrimSuffix(f, ext)
+ if !strings.HasSuffix(f, TSMFileExtension) {
+ // This isn't a .tsm file.
+ continue
+ }
+
+ ch := make(chan seriesKey, 1)
+ readers = append(readers, ch)
fd, err := os.Open(f)
if err != nil {
@@ -1082,9 +995,9 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {
// lock contention on the index.
merged := merge(readers...)
for v := range merged {
- fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)
- if err != nil {
- return err
+ fieldType := BlockTypeToInfluxQLDataType(v.typ)
+ if fieldType == influxql.Unknown {
+ return fmt.Errorf("unknown block type: %v", v.typ)
}
if err := e.addToIndexFromKey(v.key, fieldType); err != nil {
@@ -1105,8 +1018,12 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as
return "", err
}
- nativeFileName := filepath.FromSlash(hdr.Name)
+ if !strings.HasSuffix(hdr.Name, TSMFileExtension) {
+ // This isn't a .tsm file.
+ return "", nil
+ }
+ nativeFileName := filepath.FromSlash(hdr.Name)
// Skip file if it does not have a matching prefix.
if !filepath.HasPrefix(nativeFileName, shardRelativePath) {
return "", nil
@@ -1116,6 +1033,14 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as
return "", err
}
+ // If this is a directory entry (usually just `index` for tsi), create it an move on.
+ if hdr.Typeflag == tar.TypeDir {
+ if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil {
+ return "", err
+ }
+ return "", nil
+ }
+
if asNew {
filename = fmt.Sprintf("%09d-%09d.%s", e.FileStore.NextGeneration(), 1, TSMFileExtension)
}
@@ -1152,12 +1077,16 @@ func (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) erro
return err
}
+ tags := models.ParseTags(seriesKey)
// Build in-memory index, if necessary.
if e.index.Type() == inmem.IndexName {
- tags, _ := models.ParseTags(seriesKey)
if err := e.index.InitializeSeries(seriesKey, name, tags); err != nil {
return err
}
+ } else {
+ if err := e.index.CreateSeriesIfNotExists(seriesKey, name, tags); err != nil {
+ return err
+ }
}
return nil
@@ -1231,19 +1160,19 @@ func (e *Engine) WritePoints(points []models.Point) error {
}
// DeleteSeriesRange removes the values between min and max (inclusive) from all series
-func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error {
+func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64, removeIndex bool) error {
var disableOnce bool
- // Ensure that the index does not compact away the measurement or series we're
- // going to delete before we're done with them.
- if tsiIndex, ok := e.index.(*tsi1.Index); ok {
- fs := tsiIndex.RetainFileSet()
- defer fs.Release()
- }
-
var sz int
batch := make([][]byte, 0, 10000)
- for elem := itr.Next(); elem != nil; elem = itr.Next() {
+ for {
+ elem, err := itr.Next()
+ if err != nil {
+ return err
+ } else if elem == nil {
+ break
+ }
+
if elem.Expr() != nil {
if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val {
return errors.New("fields not supported in WHERE clause during deletion")
@@ -1268,7 +1197,7 @@ func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) erro
if sz >= deleteFlushThreshold {
// Delete all matching batch.
- if err := e.deleteSeriesRange(batch, min, max); err != nil {
+ if err := e.deleteSeriesRange(batch, min, max, removeIndex); err != nil {
return err
}
batch = batch[:0]
@@ -1278,20 +1207,24 @@ func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) erro
if len(batch) > 0 {
// Delete all matching batch.
- if err := e.deleteSeriesRange(batch, min, max); err != nil {
+ if err := e.deleteSeriesRange(batch, min, max, removeIndex); err != nil {
return err
}
batch = batch[:0]
}
- e.index.Rebuild()
+ if removeIndex {
+ e.index.Rebuild()
+ }
return nil
}
-// deleteSeriesRange removes the values between min and max (inclusive) from all series. This
-// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange
-// and not directly.
-func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
+// deleteSeriesRange removes the values between min and max (inclusive) from all
+// series in the TSM engine. If removeIndex is true, then series will also be
+// removed from the index.
+//
+// This should mainly be called by DeleteSeriesRange and not directly.
+func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64, removeIndex bool) error {
ts := time.Now().UTC().UnixNano()
if len(seriesKeys) == 0 {
return nil
@@ -1387,7 +1320,7 @@ func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
// exists now. To reconcile the index, we walk the series keys that still exists
// on disk and cross out any keys that match the passed in series. Any series
// left in the slice at the end do not exist and can be deleted from the index.
- // Note: this is inherently racy if writes are occuring to the same measurement/series are
+ // Note: this is inherently racy if writes are occurring to the same measurement/series are
// being removed. A write could occur and exist in the cache at this point, but we
// would delete it from the index.
minKey := seriesKeys[0]
@@ -1431,6 +1364,7 @@ func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
// Have we deleted all values for the series? If so, we need to remove
// the series from the index.
if len(seriesKeys) > 0 {
+ buf := make([]byte, 1024) // For use when accessing series file.
for _, k := range seriesKeys {
// This key was crossed out earlier, skip it
if k == nil {
@@ -1451,13 +1385,27 @@ func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error {
i++
}
- // Some cache values still exists, leave the series in the index.
- if hasCacheValues {
+ if hasCacheValues || !removeIndex {
continue
}
+ // Remove the series from the series file and index.
+
+ // TODO(edd): we need to first check with all other shards if it's
+ // OK to tombstone the series in the series file.
+ //
+ // Further, in the case of the inmem index, we should only remove
+ // the series from the index if we also tombstone it in the series
+ // file.
+ name, tags := models.ParseKey(k)
+ sid := e.sfile.SeriesID([]byte(name), tags, buf)
+ if sid == 0 {
+ return fmt.Errorf("unable to find id for series key %s during deletion", k)
+ }
+
// Remove the series from the index for this shard
- if err := e.index.UnassignShard(string(k), e.id, ts); err != nil {
+ id := (sid << 32) | e.id
+ if err := e.index.UnassignShard(string(k), id, ts); err != nil {
return err
}
}
@@ -1511,13 +1459,16 @@ func (e *Engine) DeleteMeasurement(name []byte) error {
// DeleteMeasurement deletes a measurement and all related series.
func (e *Engine) deleteMeasurement(name []byte) error {
// Attempt to find the series keys.
- itr, err := e.index.MeasurementSeriesKeysByExprIterator(name, nil)
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ itr, err := indexSet.MeasurementSeriesByExprIterator(name, nil)
if err != nil {
return err
- } else if itr != nil {
- return e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64)
+ } else if itr == nil {
+ return nil
}
- return nil
+ defer itr.Close()
+ // Delete all associated series and remove them from the index.
+ return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64, true)
}
// ForEachMeasurementName iterates over each measurement name in the engine.
@@ -1525,15 +1476,6 @@ func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {
return e.index.ForEachMeasurementName(fn)
}
-func (e *Engine) MeasurementSeriesKeysByExprIterator(name []byte, expr influxql.Expr) (tsdb.SeriesIterator, error) {
- return e.index.MeasurementSeriesKeysByExprIterator(name, expr)
-}
-
-// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
-func (e *Engine) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
- return e.index.MeasurementSeriesKeysByExpr(name, expr)
-}
-
func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)
}
@@ -1617,7 +1559,7 @@ func (e *Engine) CreateSnapshot() (string, error) {
}
// Generate a snapshot of the index.
- return path, e.index.SnapshotTo(path)
+ return path, nil
}
// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.
@@ -2129,7 +2071,8 @@ func (e *Engine) createCallIterator(ctx context.Context, measurement string, cal
}
// Determine tagsets for this measurement based on dimensions and filters.
- tagSets, err := e.index.TagSets([]byte(measurement), opt)
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt)
if err != nil {
return nil, err
}
@@ -2199,7 +2142,8 @@ func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, o
}
// Determine tagsets for this measurement based on dimensions and filters.
- tagSets, err := e.index.TagSets([]byte(measurement), opt)
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt)
if err != nil {
return nil, err
}
@@ -2213,7 +2157,6 @@ func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, o
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)
-
itrs := make([]query.Iterator, 0, len(tagSets))
if err := func() error {
for _, t := range tagSets {
@@ -2661,7 +2604,8 @@ func (e *Engine) IteratorCost(measurement string, opt query.IteratorOptions) (qu
}
// Determine all of the tag sets for this query.
- tagSets, err := e.index.TagSets([]byte(measurement), opt)
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt)
if err != nil {
return query.IteratorCost{}, err
}
@@ -2723,10 +2667,6 @@ func (e *Engine) seriesCost(seriesKey, field string, tmin, tmax int64) query.Ite
return c
}
-func (e *Engine) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) {
- return e.index.SeriesPointIterator(opt)
-}
-
// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.
func SeriesFieldKey(seriesKey, field string) string {
return seriesKey + keyFieldSeparator + field
@@ -2740,21 +2680,22 @@ func SeriesFieldKeyBytes(seriesKey, field string) []byte {
return b
}
-func tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) {
- switch typ {
- case BlockFloat64:
- return influxql.Float, nil
- case BlockInteger:
- return influxql.Integer, nil
- case BlockUnsigned:
- return influxql.Unsigned, nil
- case BlockBoolean:
- return influxql.Boolean, nil
- case BlockString:
- return influxql.String, nil
- default:
- return influxql.Unknown, fmt.Errorf("unknown block type: %v", typ)
+var (
+ blockToFieldType = []influxql.DataType{
+ BlockFloat64: influxql.Float,
+ BlockInteger: influxql.Integer,
+ BlockBoolean: influxql.Boolean,
+ BlockString: influxql.String,
+ BlockUnsigned: influxql.Unsigned,
}
+)
+
+func BlockTypeToInfluxQLDataType(typ byte) influxql.DataType {
+ if int(typ) < len(blockToFieldType) {
+ return blockToFieldType[typ]
+ }
+
+ return influxql.Unknown
}
// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go
index 468eaf8a45..53475c4fbc 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go
@@ -14,12 +14,14 @@ import (
"path/filepath"
"reflect"
"runtime"
+ "sort"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
+ "github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/deep"
"github.com/influxdata/influxdb/query"
@@ -29,147 +31,46 @@ import (
"github.com/influxdata/influxql"
)
-/*
-// Ensure engine can load the metadata index after reopening.
-func TestEngine_LoadMetadataIndex(t *testing.T) {
- e := MustOpenEngine()
- defer e.Close()
-
- if err := e.WritePointsString(`cpu,host=A value=1.1 1000000000`); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
-
- // Ensure we can close and load index from the WAL
- if err := e.Reopen(); err != nil {
- t.Fatal(err)
- }
-
- // Load metadata index.
- index := MustNewDatabaseIndex("db")
- if err := e.LoadMetadataIndex(1, index); err != nil {
- t.Fatal(err)
- }
-
- // Verify index is correct.
- m, err := index.Measurement([]byte("cpu"))
- if err != nil {
- t.Fatal(err)
- } else if m == nil {
- t.Fatal("measurement not found")
- } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) {
- t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags())
- }
-
- // write the snapshot, ensure we can close and load index from TSM
- if err := e.WriteSnapshot(); err != nil {
- t.Fatalf("error writing snapshot: %s", err.Error())
- }
-
- // Ensure we can close and load index from the WAL
- if err := e.Reopen(); err != nil {
- t.Fatal(err)
- }
-
- // Load metadata index.
- index = MustNewDatabaseIndex("db")
- if err := e.LoadMetadataIndex(1, index); err != nil {
- t.Fatal(err)
- }
-
- // Verify index is correct.
- if m, err = index.Measurement([]byte("cpu")); err != nil {
- t.Fatal(err)
- } else if m == nil {
- t.Fatal("measurement not found")
- } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) {
- t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags())
- }
-
- // Write a new point and ensure we can close and load index from TSM and WAL
- if err := e.WritePoints([]models.Point{
- MustParsePointString("cpu,host=B value=1.2 2000000000"),
- }); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
-
- // Ensure we can close and load index from the TSM & WAL
- if err := e.Reopen(); err != nil {
- t.Fatal(err)
- }
-
- // Load metadata index.
- index = MustNewDatabaseIndex("db")
- if err := e.LoadMetadataIndex(1, index); err != nil {
- t.Fatal(err)
- }
-
- // Verify index is correct.
- if m, err = index.Measurement([]byte("cpu")); err != nil {
- t.Fatal(err)
- } else if m == nil {
- t.Fatal("measurement not found")
- } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) {
- t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags())
- } else if s := m.SeriesByID(2); s.Key != "cpu,host=B" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "B"})) {
- t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags())
- }
-}
-*/
-
// Ensure that deletes only sent to the WAL will clear out the data from the cache on restart
func TestEngine_DeleteWALLoadMetadata(t *testing.T) {
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=B value=1.2 2000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=B value=1.2 2000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- // Remove series.
- itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
- if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil {
- t.Fatalf("failed to delete series: %s", err.Error())
- }
+ // Remove series.
+ itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
+ if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64, false); err != nil {
+ t.Fatalf("failed to delete series: %s", err.Error())
+ }
- // Ensure we can close and load index from the WAL
- if err := e.Reopen(); err != nil {
- t.Fatal(err)
- }
+ // Ensure we can close and load index from the WAL
+ if err := e.Reopen(); err != nil {
+ t.Fatal(err)
+ }
- if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got {
- t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
- }
+ if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got {
+ t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
+ }
- if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got {
- t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
+ if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got {
+ t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp)
+ }
+ })
}
}
// Ensure that the engine can write & read shard digest files.
func TestEngine_Digest(t *testing.T) {
- // Create a tmp directory for test files.
- tmpDir, err := ioutil.TempDir("", "TestEngine_Digest")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(tmpDir)
-
- walPath := filepath.Join(tmpDir, "wal")
- os.MkdirAll(walPath, 0777)
-
- idxPath := filepath.Join(tmpDir, "index")
-
- // Create an engine to write a tsm file.
- dbName := "db0"
- opt := tsdb.NewEngineOptions()
- opt.InmemIndex = inmem.NewIndex(dbName)
- idx := tsdb.MustOpenIndex(1, dbName, idxPath, opt)
- defer idx.Close()
-
- e := tsm1.NewEngine(1, idx, dbName, tmpDir, walPath, opt).(*tsm1.Engine)
+ e := MustOpenEngine(inmem.IndexName)
+ defer e.Close()
if err := e.Open(); err != nil {
t.Fatalf("failed to open tsm1 engine: %s", err.Error())
@@ -190,11 +91,15 @@ func TestEngine_Digest(t *testing.T) {
digest := func() ([]span, error) {
// Get a reader for the shard's digest.
- r, err := e.Digest()
+ r, sz, err := e.Digest()
if err != nil {
return nil, err
}
+ if sz <= 0 {
+ t.Fatalf("expected digest size > 0")
+ }
+
// Make sure the digest can be read.
dr, err := tsm1.NewDigestReader(r)
if err != nil {
@@ -307,6 +212,9 @@ type span struct {
// Ensure that the engine will backup any TSM files created since the passed in time
func TestEngine_Backup(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Generate temporary file.
f, _ := ioutil.TempFile("", "tsm")
f.Close()
@@ -323,11 +231,11 @@ func TestEngine_Backup(t *testing.T) {
// Write those points to the engine.
db := path.Base(f.Name())
opt := tsdb.NewEngineOptions()
- opt.InmemIndex = inmem.NewIndex(db)
- idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), opt)
+ opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile)
+ idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt)
defer idx.Close()
- e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, opt).(*tsm1.Engine)
+ e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine)
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
@@ -424,14 +332,17 @@ func TestEngine_Export(t *testing.T) {
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=C value=1.3 3000000000")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Write those points to the engine.
db := path.Base(f.Name())
opt := tsdb.NewEngineOptions()
- opt.InmemIndex = inmem.NewIndex(db)
- idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), opt)
+ opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile)
+ idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt)
defer idx.Close()
- e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, opt).(*tsm1.Engine)
+ e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine)
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
@@ -676,52 +587,55 @@ func getExportData(exBuf *bytes.Buffer) (map[string]*bytes.Buffer, error) {
func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- // e.CreateMeasurement("cpu")
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A value=1.2 2000000000`,
- `cpu,host=A value=1.3 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A value=1.2 2000000000`,
+ `cpu,host=A value=1.3 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Dimensions: []string{"host"},
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- Ascending: true,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Dimensions: []string{"host"},
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Ascending: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(1): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
- t.Fatalf("unexpected point(1): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
- t.Fatalf("unexpected point(2): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(1): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
+ t.Fatalf("unexpected point(2): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
}
@@ -729,51 +643,56 @@ func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) {
func TestEngine_CreateIterator_Cache_Descending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A value=1.2 2000000000`,
- `cpu,host=A value=1.3 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Dimensions: []string{"host"},
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- Ascending: false,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A value=1.2 2000000000`,
+ `cpu,host=A value=1.3 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unepxected error(1): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
- t.Fatalf("unexpected point(1): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
- t.Fatalf("unexpected point(2): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Dimensions: []string{"host"},
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Ascending: false,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
+
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unepxected error(1): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
+ t.Fatalf("unexpected point(2): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
}
@@ -781,52 +700,56 @@ func TestEngine_CreateIterator_Cache_Descending(t *testing.T) {
func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
-
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A value=1.2 2000000000`,
- `cpu,host=A value=1.3 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
- e.MustWriteSnapshot()
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Dimensions: []string{"host"},
- StartTime: 1000000000,
- EndTime: 3000000000,
- Ascending: true,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A value=1.2 2000000000`,
+ `cpu,host=A value=1.3 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
+ e.MustWriteSnapshot()
+
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Dimensions: []string{"host"},
+ StartTime: 1000000000,
+ EndTime: 3000000000,
+ Ascending: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(1): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
- t.Fatalf("unexpected point(1): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
- t.Fatalf("unexpected point(2): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(1): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
+ t.Fatalf("unexpected point(2): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
}
@@ -834,52 +757,56 @@ func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) {
func TestEngine_CreateIterator_TSM_Descending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
-
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A value=1.2 2000000000`,
- `cpu,host=A value=1.3 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
- e.MustWriteSnapshot()
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Dimensions: []string{"host"},
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- Ascending: false,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A value=1.2 2000000000`,
+ `cpu,host=A value=1.3 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
+ e.MustWriteSnapshot()
+
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Dimensions: []string{"host"},
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Ascending: false,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(1): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
- t.Fatalf("unexpected point(1): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
- t.Fatalf("unexpected point(2): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(1): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
+ t.Fatalf("unexpected point(2): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
}
@@ -887,55 +814,59 @@ func TestEngine_CreateIterator_TSM_Descending(t *testing.T) {
func TestEngine_CreateIterator_Aux(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
-
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A F=100 1000000000`,
- `cpu,host=A value=1.2 2000000000`,
- `cpu,host=A value=1.3 3000000000`,
- `cpu,host=A F=200 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A F=100 1000000000`,
+ `cpu,host=A value=1.2 2000000000`,
+ `cpu,host=A value=1.3 3000000000`,
+ `cpu,host=A F=200 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Aux: []influxql.VarRef{{Val: "F"}},
- Dimensions: []string{"host"},
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- Ascending: true,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Aux: []influxql.VarRef{{Val: "F"}},
+ Dimensions: []string{"host"},
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Ascending: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(1): %v", err)
- } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) {
- t.Fatalf("unexpected point(1): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %v", err)
- } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) {
- t.Fatalf("unexpected point(2): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(1): %v", err)
+ } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %v", err)
+ } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) {
+ t.Fatalf("unexpected point(2): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
}
@@ -943,62 +874,149 @@ func TestEngine_CreateIterator_Aux(t *testing.T) {
func TestEngine_CreateIterator_Condition(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
-
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float)
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("Y"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- e.SetFieldName([]byte("cpu"), "X")
- e.SetFieldName([]byte("cpu"), "Y")
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1000000000`,
- `cpu,host=A X=10 1000000000`,
- `cpu,host=A Y=100 1000000000`,
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float)
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("Y"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ e.SetFieldName([]byte("cpu"), "X")
+ e.SetFieldName([]byte("cpu"), "Y")
+
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1000000000`,
+ `cpu,host=A X=10 1000000000`,
+ `cpu,host=A Y=100 1000000000`,
+
+ `cpu,host=A value=1.2 2000000000`,
+
+ `cpu,host=A value=1.3 3000000000`,
+ `cpu,host=A X=20 3000000000`,
+ `cpu,host=A Y=200 3000000000`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- `cpu,host=A value=1.2 2000000000`,
+ itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Dimensions: []string{"host"},
+ Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`),
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Ascending: true,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ fitr := itr.(query.FloatIterator)
- `cpu,host=A value=1.3 3000000000`,
- `cpu,host=A X=20 3000000000`,
- `cpu,host=A Y=200 3000000000`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
+ t.Fatalf("unexpected point(0): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected point(1): %v", err)
+ } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
+ t.Fatalf("unexpected point(1): %v", p)
+ }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("expected eof, got error: %v", err)
+ } else if p != nil {
+ t.Fatalf("expected eof: %v", p)
+ }
+ })
}
+}
- itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Dimensions: []string{"host"},
- Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`),
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- Ascending: true,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
+// Test that series id set gets updated and returned appropriately.
+func TestIndex_SeriesIDSet(t *testing.T) {
+ test := func(index string) error {
+ engine := MustOpenEngine(index)
+ defer engine.Close()
+
+ // Add some series.
+ engine.MustAddSeries("cpu", map[string]string{"host": "a", "region": "west"})
+ engine.MustAddSeries("cpu", map[string]string{"host": "b", "region": "west"})
+ engine.MustAddSeries("cpu", map[string]string{"host": "b"})
+ engine.MustAddSeries("gpu", nil)
+ engine.MustAddSeries("gpu", map[string]string{"host": "b"})
+ engine.MustAddSeries("mem", map[string]string{"host": "z"})
+
+ // Collect series IDs.
+ var ids []uint64
+ var e tsdb.SeriesIDElem
+ var err error
+
+ itr := engine.sfile.SeriesIDIterator()
+ for e, err = itr.Next(); ; e, err = itr.Next() {
+ if err != nil {
+ return err
+ } else if e.SeriesID == 0 {
+ break
+ }
+ ids = append(ids, e.SeriesID)
+ }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
- t.Fatalf("unexpected point(0): %v", p)
- }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected point(1): %v", err)
- } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
- t.Fatalf("unexpected point(1): %v", p)
+ for _, id := range ids {
+ if !engine.SeriesIDSet().Contains(id) {
+ return fmt.Errorf("bitmap does not contain ID: %d", id)
+ }
+ }
+
+ // Drop all the series for the gpu measurement and they should no longer
+ // be in the series ID set.
+ if err := engine.DeleteMeasurement([]byte("gpu")); err != nil {
+ return err
+ }
+
+ // Drop the specific mem series
+ ditr := &seriesIterator{keys: [][]byte{[]byte("mem,host=z")}}
+ if err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64, true); err != nil {
+ return err
+ }
+
+ // Since series IDs are added sequentially, the last two would be the
+ // series for the gpu measurement...
+ for _, id := range ids {
+ contains := engine.SeriesIDSet().Contains(id)
+ if id < 4 && !contains {
+ return fmt.Errorf("bitmap does not contain ID: %d, but should", id)
+ } else if id >= 4 && contains {
+ return fmt.Errorf("bitmap still contains ID: %d after delete", id)
+ }
+ }
+
+ // Reopen the engine, and the series should be re-added to the bitmap.
+ if err := engine.Reopen(); err != nil {
+ panic(err)
+ }
+
+ for _, id := range ids {
+ contains := engine.SeriesIDSet().Contains(id)
+ if id < 4 && !contains {
+ return fmt.Errorf("[after re-open] bitmap does not contain ID: %d, but should", id)
+ } else if id >= 4 && contains {
+ return fmt.Errorf("[after re-open] bitmap still contains ID: %d after delete", id)
+ }
+ }
+ return nil
}
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("expected eof, got error: %v", err)
- } else if p != nil {
- t.Fatalf("expected eof: %v", p)
+
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ if err := test(index); err != nil {
+ t.Error(err)
+ }
+ })
}
}
// Ensures that deleting series from TSM files with multiple fields removes all the
-/// series
+// series from the TSM files but leaves the series in the index intact.
func TestEngine_DeleteSeries(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
@@ -1007,16 +1025,19 @@ func TestEngine_DeleteSeries(t *testing.T) {
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000")
- e := NewEngine(index)
+ e, err := NewEngine(index)
+ if err != nil {
+ t.Fatal(err)
+ }
+
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
-
if err := e.Open(); err != nil {
- panic(err)
+ t.Fatal(err)
}
defer e.Close()
- if err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil {
+ if err := e.writePoints(p1, p2, p3); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
if err := e.WriteSnapshot(); err != nil {
@@ -1029,7 +1050,7 @@ func TestEngine_DeleteSeries(t *testing.T) {
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
- if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil {
+ if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64, false); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
@@ -1042,29 +1063,66 @@ func TestEngine_DeleteSeries(t *testing.T) {
if _, ok := keys[exp]; !ok {
t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys)
}
+
+ // Deleting all the TSM values for a single series should still leave
+ // the series in the index intact.
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu"))
+ if err != nil {
+ t.Fatalf("iterator error: %v", err)
+ } else if iter == nil {
+ t.Fatal("nil iterator")
+ }
+ defer iter.Close()
+
+ var gotKeys []string
+ expKeys := []string{"cpu,host=A", "cpu,host=B"}
+
+ for {
+ elem, err := iter.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if elem.SeriesID == 0 {
+ break
+ }
+
+ // Lookup series.
+ name, tags := e.sfile.Series(elem.SeriesID)
+ gotKeys = append(gotKeys, string(models.MakeKey(name, tags)))
+ }
+
+ if !reflect.DeepEqual(gotKeys, expKeys) {
+ t.Fatalf("got keys %v, expected %v", gotKeys, expKeys)
+ }
})
}
}
+// Ensures that deleting series from TSM files over a range of time deleted the
+// series from the TSM files but leaves the series in the index.
func TestEngine_DeleteSeriesRange(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
// Create a few points.
- p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000") // Should not be deleted
+ p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000")
p2 := MustParsePointString("cpu,host=A value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A value=1.3 3000000000")
- p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") // Should not be deleted
- p5 := MustParsePointString("cpu,host=B value=1.3 5000000000") // Should not be deleted
+ p4 := MustParsePointString("cpu,host=B value=1.3 4000000000")
+ p5 := MustParsePointString("cpu,host=B value=1.3 5000000000")
p6 := MustParsePointString("cpu,host=C value=1.3 1000000000")
- p7 := MustParsePointString("mem,host=C value=1.3 1000000000") // Should not be deleted
- p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted
+ p7 := MustParsePointString("mem,host=C value=1.3 1000000000")
+ p8 := MustParsePointString("disk,host=C value=1.3 1000000000")
+
+ e, err := NewEngine(index)
+ if err != nil {
+ t.Fatal(err)
+ }
- e := NewEngine(index)
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
-
if err := e.Open(); err != nil {
- panic(err)
+ t.Fatal(err)
}
defer e.Close()
@@ -1087,7 +1145,7 @@ func TestEngine_DeleteSeriesRange(t *testing.T) {
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C")}}
- if err := e.DeleteSeriesRange(itr, 0, 3000000000); err != nil {
+ if err := e.DeleteSeriesRange(itr, 0, 3000000000, false); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
@@ -1101,36 +1159,37 @@ func TestEngine_DeleteSeriesRange(t *testing.T) {
t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys)
}
- // Check that the series still exists in the index
- iter, err := e.MeasurementSeriesKeysByExprIterator([]byte("cpu"), nil)
+ // Deleting all the TSM values for a single series should still leave
+ // the series in the index intact.
+ indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile}
+ iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
+ } else if iter == nil {
+ t.Fatal("nil iterator")
}
+ defer iter.Close()
- elem := iter.Next()
- if elem == nil {
- t.Fatalf("series index mismatch: got nil, exp 2 series")
- }
+ var gotKeys []string
+ expKeys := []string{"cpu,host=0", "cpu,host=A", "cpu,host=B", "cpu,host=C"}
- if got, exp := elem.Name(), []byte("cpu"); !bytes.Equal(got, exp) {
- t.Fatalf("series mismatch: got %s, exp %s", got, exp)
- }
-
- if got, exp := elem.Tags(), models.NewTags(map[string]string{"host": "0"}); !got.Equal(exp) {
- t.Fatalf("series mismatch: got %s, exp %s", got, exp)
- }
-
- elem = iter.Next()
- if elem == nil {
- t.Fatalf("series index next mismatch: got nil")
- }
+ for {
+ elem, err := iter.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if elem.SeriesID == 0 {
+ break
+ }
- if got, exp := elem.Name(), []byte("cpu"); !bytes.Equal(got, exp) {
- t.Fatalf("series mismatch: got %s, exp %s", got, exp)
+ // Lookup series.
+ name, tags := e.sfile.Series(elem.SeriesID)
+ gotKeys = append(gotKeys, string(models.MakeKey(name, tags)))
}
+ sort.Strings(gotKeys)
- if got, exp := elem.Tags(), models.NewTags(map[string]string{"host": "B"}); !got.Equal(exp) {
- t.Fatalf("series mismatch: got %s, exp %s", got, exp)
+ if !reflect.DeepEqual(gotKeys, expKeys) {
+ t.Fatalf("got keys %v, expected %v", gotKeys, expKeys)
}
})
@@ -1143,12 +1202,15 @@ func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) {
// Create a few points.
p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") // Should not be deleted
- e := NewEngine(index)
+ e, err := NewEngine(index)
+ if err != nil {
+ t.Fatal(err)
+ }
+
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
-
if err := e.Open(); err != nil {
- panic(err)
+ t.Fatal(err)
}
defer e.Close()
@@ -1171,7 +1233,7 @@ func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) {
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
- if err := e.DeleteSeriesRange(itr, 0, 0); err != nil {
+ if err := e.DeleteSeriesRange(itr, 0, 0, false); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
@@ -1186,25 +1248,27 @@ func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) {
}
// Check that the series still exists in the index
- iter, err := e.MeasurementSeriesKeysByExprIterator([]byte("cpu"), nil)
+ iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu"))
if err != nil {
t.Fatalf("iterator error: %v", err)
}
+ defer iter.Close()
- if iter == nil {
- t.Fatalf("series iterator nil")
+ elem, err := iter.Next()
+ if err != nil {
+ t.Fatal(err)
}
-
- elem := iter.Next()
- if elem == nil {
- t.Fatalf("series index mismatch: got nil, exp 1 series")
+ if elem.SeriesID == 0 {
+ t.Fatalf("series index mismatch: EOF, exp 1 series")
}
- if got, exp := elem.Name(), []byte("cpu"); !bytes.Equal(got, exp) {
+ // Lookup series.
+ name, tags := e.sfile.Series(elem.SeriesID)
+ if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
- if got, exp := elem.Tags(), models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) {
+ if got, exp := tags, models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) {
t.Fatalf("series mismatch: got %s, exp %s", got, exp)
}
})
@@ -1219,22 +1283,20 @@ func TestEngine_LastModified(t *testing.T) {
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000")
- e := NewEngine(index)
+ e, err := NewEngine(index)
+ if err != nil {
+ t.Fatal(err)
+ }
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
-
- if lm := e.LastModified(); !lm.IsZero() {
- t.Fatalf("expected zero time, got %v", lm.UTC())
- }
-
e.SetEnabled(false)
if err := e.Open(); err != nil {
- t.Fatalf("failed to open tsm1 engine: %s", err.Error())
+ t.Fatal(err)
}
defer e.Close()
- if err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil {
+ if err := e.writePoints(p1, p2, p3); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
}
@@ -1255,7 +1317,7 @@ func TestEngine_LastModified(t *testing.T) {
}
itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}}
- if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil {
+ if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64, false); err != nil {
t.Fatalf("failed to delete series: %v", err)
}
@@ -1268,6 +1330,9 @@ func TestEngine_LastModified(t *testing.T) {
}
func TestEngine_SnapshotsDisabled(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Generate temporary file.
dir, _ := ioutil.TempDir("", "tsm")
walPath := filepath.Join(dir, "wal")
@@ -1277,11 +1342,11 @@ func TestEngine_SnapshotsDisabled(t *testing.T) {
// Create a tsm1 engine.
db := path.Base(dir)
opt := tsdb.NewEngineOptions()
- opt.InmemIndex = inmem.NewIndex(db)
- idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), opt)
+ opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile)
+ idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt)
defer idx.Close()
- e := tsm1.NewEngine(1, idx, db, dir, walPath, opt).(*tsm1.Engine)
+ e := tsm1.NewEngine(1, idx, db, dir, walPath, sfile.SeriesFile, opt).(*tsm1.Engine)
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
@@ -1306,48 +1371,53 @@ func TestEngine_SnapshotsDisabled(t *testing.T) {
func TestEngine_CreateCursor_Ascending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1`,
- `cpu,host=A value=1.2 2`,
- `cpu,host=A value=1.3 3`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
- e.MustWriteSnapshot()
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- if err := e.WritePointsString(
- `cpu,host=A value=10.1 10`,
- `cpu,host=A value=11.2 11`,
- `cpu,host=A value=12.3 12`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1`,
+ `cpu,host=A value=1.2 2`,
+ `cpu,host=A value=1.3 3`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
+ e.MustWriteSnapshot()
- cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{
- Measurement: "cpu",
- Series: "cpu,host=A",
- Field: "value",
- Ascending: true,
- StartTime: 2,
- EndTime: 11,
- })
- if err != nil {
- t.Fatal(err)
- }
+ if err := e.WritePointsString(
+ `cpu,host=A value=10.1 10`,
+ `cpu,host=A value=11.2 11`,
+ `cpu,host=A value=12.3 12`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
- fcur := cur.(tsdb.FloatBatchCursor)
- ts, vs := fcur.Next()
- if !cmp.Equal([]int64{2, 3, 10, 11}, ts) {
- t.Fatal("unexpect timestamps")
- }
- if !cmp.Equal([]float64{1.2, 1.3, 10.1, 11.2}, vs) {
- t.Fatal("unexpect timestamps")
+ cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{
+ Measurement: "cpu",
+ Series: "cpu,host=A",
+ Field: "value",
+ Ascending: true,
+ StartTime: 2,
+ EndTime: 11,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fcur := cur.(tsdb.FloatBatchCursor)
+ ts, vs := fcur.Next()
+ if !cmp.Equal([]int64{2, 3, 10, 11}, ts) {
+ t.Fatal("unexpect timestamps")
+ }
+ if !cmp.Equal([]float64{1.2, 1.3, 10.1, 11.2}, vs) {
+ t.Fatal("unexpect timestamps")
+ }
+ })
}
}
@@ -1355,48 +1425,74 @@ func TestEngine_CreateCursor_Ascending(t *testing.T) {
func TestEngine_CreateCursor_Descending(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
- e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
- e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
+ e := MustOpenEngine(index)
+ defer e.Close()
- if err := e.WritePointsString(
- `cpu,host=A value=1.1 1`,
- `cpu,host=A value=1.2 2`,
- `cpu,host=A value=1.3 3`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
- e.MustWriteSnapshot()
+ e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
+ e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
- if err := e.WritePointsString(
- `cpu,host=A value=10.1 10`,
- `cpu,host=A value=11.2 11`,
- `cpu,host=A value=12.3 12`,
- ); err != nil {
- t.Fatalf("failed to write points: %s", err.Error())
- }
+ if err := e.WritePointsString(
+ `cpu,host=A value=1.1 1`,
+ `cpu,host=A value=1.2 2`,
+ `cpu,host=A value=1.3 3`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
+ e.MustWriteSnapshot()
- cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{
- Measurement: "cpu",
- Series: "cpu,host=A",
- Field: "value",
- Ascending: false,
- StartTime: 2,
- EndTime: 11,
- })
- if err != nil {
- t.Fatal(err)
+ if err := e.WritePointsString(
+ `cpu,host=A value=10.1 10`,
+ `cpu,host=A value=11.2 11`,
+ `cpu,host=A value=12.3 12`,
+ ); err != nil {
+ t.Fatalf("failed to write points: %s", err.Error())
+ }
+
+ cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{
+ Measurement: "cpu",
+ Series: "cpu,host=A",
+ Field: "value",
+ Ascending: false,
+ StartTime: 2,
+ EndTime: 11,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fcur := cur.(tsdb.FloatBatchCursor)
+ ts, vs := fcur.Next()
+ if !cmp.Equal([]int64{11, 10, 3, 2}, ts) {
+ t.Fatal("unexpect timestamps")
+ }
+ if !cmp.Equal([]float64{11.2, 10.1, 1.3, 1.2}, vs) {
+ t.Fatal("unexpect timestamps")
+ }
+ })
}
+}
- fcur := cur.(tsdb.FloatBatchCursor)
- ts, vs := fcur.Next()
- if !cmp.Equal([]int64{11, 10, 3, 2}, ts) {
- t.Fatal("unexpect timestamps")
+func makeBlockTypeSlice(n int) []byte {
+ r := make([]byte, n)
+ b := tsm1.BlockFloat64
+ m := tsm1.BlockUnsigned + 1
+ for i := 0; i < len(r); i++ {
+ r[i] = b % m
}
- if !cmp.Equal([]float64{11.2, 10.1, 1.3, 1.2}, vs) {
- t.Fatal("unexpect timestamps")
+ return r
+}
+
+var blockType = influxql.Unknown
+
+func BenchmarkBlockTypeToInfluxQLDataType(b *testing.B) {
+ t := makeBlockTypeSlice(100)
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < len(t); j++ {
+ blockType = tsm1.BlockTypeToInfluxQLDataType(t[j])
+ }
}
}
@@ -1405,28 +1501,45 @@ func TestEngine_CreateCursor_Descending(t *testing.T) {
func TestEngine_DisableEnableCompactions_Concurrent(t *testing.T) {
t.Parallel()
- e := MustOpenDefaultEngine()
- defer e.Close()
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
- var wg sync.WaitGroup
- wg.Add(2)
+ e := MustOpenEngine(index)
+ defer e.Close()
- go func() {
- defer wg.Done()
- for i := 0; i < 1000; i++ {
- e.SetCompactionsEnabled(true)
- e.SetCompactionsEnabled(false)
- }
- }()
+ var wg sync.WaitGroup
+ wg.Add(2)
- go func() {
- defer wg.Done()
- for i := 0; i < 1000; i++ {
- e.SetCompactionsEnabled(false)
- e.SetCompactionsEnabled(true)
- }
- }()
- wg.Wait()
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000; i++ {
+ e.SetCompactionsEnabled(true)
+ e.SetCompactionsEnabled(false)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000; i++ {
+ e.SetCompactionsEnabled(false)
+ e.SetCompactionsEnabled(true)
+ }
+ }()
+
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+
+ // Wait for waitgroup or fail if it takes too long.
+ select {
+ case <-time.NewTimer(30 * time.Second).C:
+ t.Fatalf("timed out after 30 seconds waiting for waitgroup")
+ case <-done:
+ }
+ })
+ }
}
func BenchmarkEngine_CreateIterator_Count_1K(b *testing.B) {
@@ -1668,51 +1781,65 @@ func MustInitDefaultBenchmarkEngine(pointN int) *Engine {
// Engine is a test wrapper for tsm1.Engine.
type Engine struct {
*tsm1.Engine
- root string
- index tsdb.Index
+ root string
+ indexPath string
+ indexType string
+ index tsdb.Index
+ seriesIDSet *tsdb.SeriesIDSet
+ sfile *tsdb.SeriesFile
}
// NewEngine returns a new instance of Engine at a temporary location.
-func NewEngine(index string) *Engine {
+func NewEngine(index string) (*Engine, error) {
root, err := ioutil.TempDir("", "tsm1-")
if err != nil {
panic(err)
}
- db := path.Base(root)
+ db := "db0"
+ dbPath := filepath.Join(root, "data", db)
+
+ if err := os.MkdirAll(dbPath, os.ModePerm); err != nil {
+ return nil, err
+ }
+
+ // Setup series file.
+ sfile := tsdb.NewSeriesFile(filepath.Join(dbPath, tsdb.SeriesFileDirectory))
+ sfile.Logger = logger.New(os.Stdout)
+ if err = sfile.Open(); err != nil {
+ return nil, err
+ }
+
opt := tsdb.NewEngineOptions()
opt.IndexVersion = index
if index == "inmem" {
- opt.InmemIndex = inmem.NewIndex(db)
+ opt.InmemIndex = inmem.NewIndex(db, sfile)
}
- idx := tsdb.MustOpenIndex(1, db, filepath.Join(root, "data", "index"), opt)
+ idxPath := filepath.Join(dbPath, "index")
+ seriesIDs := tsdb.NewSeriesIDSet()
+ idx := tsdb.MustOpenIndex(1, db, idxPath, seriesIDs, sfile, opt)
+
+ tsm1Engine := tsm1.NewEngine(1, idx, db, filepath.Join(root, "data"), filepath.Join(root, "wal"), sfile, opt).(*tsm1.Engine)
return &Engine{
- Engine: tsm1.NewEngine(1,
- idx,
- db,
- filepath.Join(root, "data"),
- filepath.Join(root, "wal"),
- opt).(*tsm1.Engine),
- root: root,
- index: idx,
- }
+ Engine: tsm1Engine,
+ root: root,
+ indexPath: idxPath,
+ indexType: index,
+ index: idx,
+ seriesIDSet: seriesIDs,
+ sfile: sfile,
+ }, nil
}
-// MustOpenDefaultEngine returns a new, open instance of Engine using the default
-// index. Useful when the index is not directly under test.
-func MustOpenDefaultEngine() *Engine {
- e := NewEngine(tsdb.DefaultIndex)
- if err := e.Open(); err != nil {
+// MustOpenEngine returns a new, open instance of Engine.
+func MustOpenEngine(index string) *Engine {
+ e, err := NewEngine(index)
+ if err != nil {
panic(err)
}
- return e
-}
-// MustOpenEngine returns a new, open instance of Engine.
-func MustOpenEngine(index string) *Engine {
- e := NewEngine(index)
if err := e.Open(); err != nil {
panic(err)
}
@@ -1721,38 +1848,106 @@ func MustOpenEngine(index string) *Engine {
// Close closes the engine and removes all underlying data.
func (e *Engine) Close() error {
+ return e.close(true)
+}
+
+func (e *Engine) close(cleanup bool) error {
if e.index != nil {
e.index.Close()
}
- defer os.RemoveAll(e.root)
+
+ if e.sfile != nil {
+ e.sfile.Close()
+ }
+
+ defer func() {
+ if cleanup {
+ os.RemoveAll(e.root)
+ }
+ }()
return e.Engine.Close()
}
// Reopen closes and reopens the engine.
func (e *Engine) Reopen() error {
- if err := e.Engine.Close(); err != nil {
+ // Close engine without removing underlying engine data.
+ if err := e.close(false); err != nil {
return err
- } else if e.index.Close(); err != nil {
+ }
+
+ // Re-open series file. Must create a new series file using the same data.
+ e.sfile = tsdb.NewSeriesFile(e.sfile.Path())
+ if err := e.sfile.Open(); err != nil {
return err
}
db := path.Base(e.root)
opt := tsdb.NewEngineOptions()
- opt.InmemIndex = inmem.NewIndex(db)
+ opt.InmemIndex = inmem.NewIndex(db, e.sfile)
- e.index = tsdb.MustOpenIndex(1, db, filepath.Join(e.root, "data", "index"), opt)
+ // Re-open index.
+ e.seriesIDSet = tsdb.NewSeriesIDSet()
+ e.index = tsdb.MustOpenIndex(1, db, e.indexPath, e.seriesIDSet, e.sfile, opt)
- e.Engine = tsm1.NewEngine(1,
- e.index,
- db,
- filepath.Join(e.root, "data"),
- filepath.Join(e.root, "wal"),
- opt).(*tsm1.Engine)
+ // Re-initialize engine.
+ e.Engine = tsm1.NewEngine(1, e.index, db, filepath.Join(e.root, "data"), filepath.Join(e.root, "wal"), e.sfile, opt).(*tsm1.Engine)
+ // Reopen engine
if err := e.Engine.Open(); err != nil {
return err
}
- return nil
+
+ // Reload series data into index (no-op on TSI).
+ return e.LoadMetadataIndex(1, e.index)
+}
+
+// SeriesIDSet provides access to the underlying series id bitset in the engine's
+// index. It will panic if the underlying index does not have a SeriesIDSet
+// method.
+func (e *Engine) SeriesIDSet() *tsdb.SeriesIDSet {
+ return e.index.(interface {
+ SeriesIDSet() *tsdb.SeriesIDSet
+ }).SeriesIDSet()
+}
+
+// AddSeries adds the provided series data to the index and writes a point to
+// the engine with default values for a field and a time of now.
+func (e *Engine) AddSeries(name string, tags map[string]string) error {
+ point, err := models.NewPoint(name, models.NewTags(tags), models.Fields{"v": 1.0}, time.Now())
+ if err != nil {
+ return err
+ }
+ return e.writePoints(point)
+}
+
+// WritePointsString calls WritePointsString on the underlying engine, but also
+// adds the associated series to the index.
+func (e *Engine) WritePointsString(ptstr ...string) error {
+ points, err := models.ParsePointsString(strings.Join(ptstr, "\n"))
+ if err != nil {
+ return err
+ }
+ return e.writePoints(points...)
+}
+
+// writePoints adds the series for the provided points to the index, and writes
+// the point data to the engine.
+func (e *Engine) writePoints(points ...models.Point) error {
+ for _, point := range points {
+ // Write into the index.
+ if err := e.Engine.CreateSeriesIfNotExists(point.Key(), point.Name(), point.Tags()); err != nil {
+ return err
+ }
+ }
+ // Write the points into the cache/wal.
+ return e.WritePoints(points)
+}
+
+// MustAddSeries calls AddSeries, panicking if there is an error.
+func (e *Engine) MustAddSeries(name string, tags map[string]string) {
+ if err := e.AddSeries(name, tags); err != nil {
+ panic(err)
+ }
}
// MustWriteSnapshot forces a snapshot of the engine. Panic on error.
@@ -1762,9 +1957,35 @@ func (e *Engine) MustWriteSnapshot() {
}
}
-// WritePointsString parses a string buffer and writes the points.
-func (e *Engine) WritePointsString(buf ...string) error {
- return e.WritePoints(MustParsePointsString(strings.Join(buf, "\n")))
+// SeriesFile is a test wrapper for tsdb.SeriesFile.
+type SeriesFile struct {
+ *tsdb.SeriesFile
+}
+
+// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
+func NewSeriesFile() *SeriesFile {
+ dir, err := ioutil.TempDir("", "tsdb-series-file-")
+ if err != nil {
+ panic(err)
+ }
+ return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
+}
+
+// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error.
+func MustOpenSeriesFile() *SeriesFile {
+ f := NewSeriesFile()
+ if err := f.Open(); err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// Close closes the log file and removes it from disk.
+func (f *SeriesFile) Close() {
+ defer os.RemoveAll(f.Path())
+ if err := f.SeriesFile.Close(); err != nil {
+ panic(err)
+ }
}
// MustParsePointsString parses points from a string. Panic on error.
@@ -1813,12 +2034,14 @@ func (s series) Tags() models.Tags { return s.tags }
func (s series) Deleted() bool { return s.deleted }
func (s series) Expr() influxql.Expr { return nil }
-func (itr *seriesIterator) Next() tsdb.SeriesElem {
+func (itr *seriesIterator) Close() error { return nil }
+
+func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) {
if len(itr.keys) == 0 {
- return nil
+ return nil, nil
}
name, tags := models.ParseKeyBytes(itr.keys[0])
s := series{name: name, tags: tags}
itr.keys = itr.keys[1:]
- return s
+ return s, nil
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go
index 1eeeaf4ffe..f78b5d760c 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go
@@ -252,7 +252,7 @@ func (p *partition) write(key []byte, values Values) (bool, error) {
}
// Create a new entry using a preallocated size if we have a hint available.
- e, err := newEntryValues(values, 32)
+ e, err := newEntryValues(values)
if err != nil {
return false, err
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index.go b/vendor/github.com/influxdata/influxdb/tsdb/index.go
index c5651afbee..05e4eec803 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index.go
@@ -1,12 +1,17 @@
package tsdb
import (
+ "bytes"
+ "errors"
"fmt"
"os"
"regexp"
"sort"
+ "sync"
+ "github.com/davecgh/go-spew/spew"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/query"
"github.com/influxdata/influxql"
@@ -18,8 +23,8 @@ type Index interface {
Close() error
WithLogger(*zap.Logger)
+ Database() string
MeasurementExists(name []byte) (bool, error)
- MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error)
MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)
DropMeasurement(name []byte) error
ForEachMeasurementName(fn func(name []byte) error) error
@@ -29,57 +34,2222 @@ type Index interface {
CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error
DropSeries(key []byte, ts int64) error
- SeriesSketches() (estimator.Sketch, estimator.Sketch, error)
MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error)
SeriesN() int64
HasTagKey(name, key []byte) (bool, error)
- TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error)
+ HasTagValue(name, key, value []byte) (bool, error)
+
MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)
- MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error)
- TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool
- ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error
- TagKeyCardinality(name, key []byte) int
+ TagKeyCardinality(name, key []byte) int
+
+ // InfluxQL system iterators
+ MeasurementIterator() (MeasurementIterator, error)
+ TagKeyIterator(name []byte) (TagKeyIterator, error)
+ TagValueIterator(name, key []byte) (TagValueIterator, error)
+ MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error)
+ TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error)
+ TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error)
+
+ // Sets a shared fieldset from the engine.
+ FieldSet() *MeasurementFieldSet
+ SetFieldSet(fs *MeasurementFieldSet)
+
+ // Size of the index on disk, if applicable.
+ DiskSizeBytes() int64
+
+ // To be removed w/ tsi1.
+ SetFieldName(measurement []byte, name string)
+ AssignShard(k string, shardID uint64)
+ UnassignShard(k string, shardID uint64, ts int64) error
+ RemoveShard(shardID uint64)
+
+ Type() string
+
+ Rebuild()
+}
+
+// SeriesElem represents a generic series element.
+type SeriesElem interface {
+ Name() []byte
+ Tags() models.Tags
+ Deleted() bool
+
+ // InfluxQL expression associated with series during filtering.
+ Expr() influxql.Expr
+}
+
+// SeriesIterator represents a iterator over a list of series.
+type SeriesIterator interface {
+ Close() error
+ Next() (SeriesElem, error)
+}
+
+// NewSeriesIteratorAdapter returns an adapter for converting series ids to series.
+func NewSeriesIteratorAdapter(sfile *SeriesFile, itr SeriesIDIterator) SeriesIterator {
+ return &seriesIteratorAdapter{
+ sfile: sfile,
+ itr: itr,
+ }
+}
+
+type seriesIteratorAdapter struct {
+ sfile *SeriesFile
+ itr SeriesIDIterator
+}
+
+func (itr *seriesIteratorAdapter) Close() error { return itr.itr.Close() }
+
+func (itr *seriesIteratorAdapter) Next() (SeriesElem, error) {
+ elem, err := itr.itr.Next()
+ if err != nil {
+ return nil, err
+ } else if elem.SeriesID == 0 {
+ return nil, nil
+ }
+
+ name, tags := ParseSeriesKey(itr.sfile.SeriesKey(elem.SeriesID))
+ deleted := itr.sfile.IsDeleted(elem.SeriesID)
+
+ return &seriesElemAdapter{
+ name: name,
+ tags: tags,
+ deleted: deleted,
+ expr: elem.Expr,
+ }, nil
+}
+
+type seriesElemAdapter struct {
+ name []byte
+ tags models.Tags
+ deleted bool
+ expr influxql.Expr
+}
+
+func (e *seriesElemAdapter) Name() []byte { return e.name }
+func (e *seriesElemAdapter) Tags() models.Tags { return e.tags }
+func (e *seriesElemAdapter) Deleted() bool { return e.deleted }
+func (e *seriesElemAdapter) Expr() influxql.Expr { return e.expr }
+
+// SeriesIDElem represents a single series and optional expression.
+type SeriesIDElem struct {
+ SeriesID uint64
+ Expr influxql.Expr
+}
+
+// SeriesIDElems represents a list of series id elements.
+type SeriesIDElems []SeriesIDElem
+
+func (a SeriesIDElems) Len() int { return len(a) }
+func (a SeriesIDElems) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SeriesIDElems) Less(i, j int) bool { return a[i].SeriesID < a[j].SeriesID }
+
+// SeriesIDIterator represents a iterator over a list of series ids.
+type SeriesIDIterator interface {
+ Next() (SeriesIDElem, error)
+ Close() error
+}
+
+// ReadAllSeriesIDIterator returns all ids from the iterator.
+func ReadAllSeriesIDIterator(itr SeriesIDIterator) ([]uint64, error) {
+ if itr == nil {
+ return nil, nil
+ }
+
+ var a []uint64
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e.SeriesID == 0 {
+ break
+ }
+ a = append(a, e.SeriesID)
+ }
+ return a, nil
+}
+
+// NewSeriesIDSliceIterator returns a SeriesIDIterator that iterates over a slice.
+func NewSeriesIDSliceIterator(ids []uint64) *SeriesIDSliceIterator {
+ return &SeriesIDSliceIterator{ids: ids}
+}
+
+// SeriesIDSliceIterator iterates over a slice of series ids.
+type SeriesIDSliceIterator struct {
+ ids []uint64
+}
+
+// Next returns the next series id in the slice.
+func (itr *SeriesIDSliceIterator) Next() (SeriesIDElem, error) {
+ if len(itr.ids) == 0 {
+ return SeriesIDElem{}, nil
+ }
+ id := itr.ids[0]
+ itr.ids = itr.ids[1:]
+ return SeriesIDElem{SeriesID: id}, nil
+}
+
+func (itr *SeriesIDSliceIterator) Close() error { return nil }
+
+type SeriesIDIterators []SeriesIDIterator
+
+func (a SeriesIDIterators) Close() (err error) {
+ for i := range a {
+ if e := a[i].Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ return err
+}
+
+// seriesQueryAdapterIterator adapts SeriesIDIterator to an influxql.Iterator.
+type seriesQueryAdapterIterator struct {
+ once sync.Once
+ sfile *SeriesFile
+ itr SeriesIDIterator
+ fieldset *MeasurementFieldSet
+ opt query.IteratorOptions
+
+ point query.FloatPoint // reusable point
+}
+
+// NewSeriesQueryAdapterIterator returns a new instance of SeriesQueryAdapterIterator.
+func NewSeriesQueryAdapterIterator(sfile *SeriesFile, itr SeriesIDIterator, fieldset *MeasurementFieldSet, opt query.IteratorOptions) query.Iterator {
+ return &seriesQueryAdapterIterator{
+ sfile: sfile,
+ itr: itr,
+ fieldset: fieldset,
+ point: query.FloatPoint{
+ Aux: make([]interface{}, len(opt.Aux)),
+ },
+ opt: opt,
+ }
+}
+
+// Stats returns stats about the points processed.
+func (itr *seriesQueryAdapterIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
+
+// Close closes the iterator.
+func (itr *seriesQueryAdapterIterator) Close() error {
+ itr.once.Do(func() {
+ itr.itr.Close()
+ })
+ return nil
+}
+
+// Next emits the next point in the iterator.
+func (itr *seriesQueryAdapterIterator) Next() (*query.FloatPoint, error) {
+ for {
+ // Read next series element.
+ e, err := itr.itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e.SeriesID == 0 {
+ return nil, nil
+ }
+
+ // Convert to a key.
+ name, tags := ParseSeriesKey(itr.sfile.SeriesKey(e.SeriesID))
+ key := string(models.MakeKey(name, tags))
+
+ // Write auxiliary fields.
+ for i, f := range itr.opt.Aux {
+ switch f.Val {
+ case "key":
+ itr.point.Aux[i] = key
+ }
+ }
+ return &itr.point, nil
+ }
+}
+
+// filterUndeletedSeriesIDIterator returns all series which are not deleted.
+type filterUndeletedSeriesIDIterator struct {
+ sfile *SeriesFile
+ itr SeriesIDIterator
+}
+
+// FilterUndeletedSeriesIDIterator returns an iterator which filters all deleted series.
+func FilterUndeletedSeriesIDIterator(sfile *SeriesFile, itr SeriesIDIterator) SeriesIDIterator {
+ if itr == nil {
+ return nil
+ }
+ return &filterUndeletedSeriesIDIterator{sfile: sfile, itr: itr}
+}
+
+func (itr *filterUndeletedSeriesIDIterator) Close() error {
+ return itr.itr.Close()
+}
+
+func (itr *filterUndeletedSeriesIDIterator) Next() (SeriesIDElem, error) {
+ for {
+ e, err := itr.itr.Next()
+ if err != nil {
+ return SeriesIDElem{}, err
+ } else if e.SeriesID == 0 {
+ return SeriesIDElem{}, nil
+ } else if itr.sfile.IsDeleted(e.SeriesID) {
+ continue
+ }
+ return e, nil
+ }
+}
+
+// seriesIDExprIterator is an iterator that attaches an associated expression.
+type seriesIDExprIterator struct {
+ itr SeriesIDIterator
+ expr influxql.Expr
+}
+
+// newSeriesIDExprIterator returns a new instance of seriesIDExprIterator.
+func newSeriesIDExprIterator(itr SeriesIDIterator, expr influxql.Expr) SeriesIDIterator {
+ if itr == nil {
+ return nil
+ }
+
+ return &seriesIDExprIterator{
+ itr: itr,
+ expr: expr,
+ }
+}
+
+func (itr *seriesIDExprIterator) Close() error {
+ return itr.itr.Close()
+}
+
+// Next returns the next element in the iterator.
+func (itr *seriesIDExprIterator) Next() (SeriesIDElem, error) {
+ elem, err := itr.itr.Next()
+ if err != nil {
+ return SeriesIDElem{}, err
+ } else if elem.SeriesID == 0 {
+ return SeriesIDElem{}, nil
+ }
+ elem.Expr = itr.expr
+ return elem, nil
+}
+
+// MergeSeriesIDIterators returns an iterator that merges a set of iterators.
+// Iterators that are first in the list take precendence and a deletion by those
+// early iterators will invalidate elements by later iterators.
+func MergeSeriesIDIterators(itrs ...SeriesIDIterator) SeriesIDIterator {
+ if n := len(itrs); n == 0 {
+ return nil
+ } else if n == 1 {
+ return itrs[0]
+ }
+
+ return &seriesIDMergeIterator{
+ buf: make([]SeriesIDElem, len(itrs)),
+ itrs: itrs,
+ }
+}
+
+// seriesIDMergeIterator is an iterator that merges multiple iterators together.
+type seriesIDMergeIterator struct {
+ buf []SeriesIDElem
+ itrs []SeriesIDIterator
+}
+
+func (itr *seriesIDMergeIterator) Close() error {
+ SeriesIDIterators(itr.itrs).Close()
+ return nil
+}
+
+// Next returns the element with the next lowest name/tags across the iterators.
+func (itr *seriesIDMergeIterator) Next() (SeriesIDElem, error) {
+ // Find next lowest id amongst the buffers.
+ var elem SeriesIDElem
+ for i := range itr.buf {
+ buf := &itr.buf[i]
+
+ // Fill buffer.
+ if buf.SeriesID == 0 {
+ elem, err := itr.itrs[i].Next()
+ if err != nil {
+ return SeriesIDElem{}, nil
+ } else if elem.SeriesID == 0 {
+ continue
+ }
+ itr.buf[i] = elem
+ }
+
+ if elem.SeriesID == 0 || buf.SeriesID < elem.SeriesID {
+ elem = *buf
+ }
+ }
+
+ // Return EOF if no elements remaining.
+ if elem.SeriesID == 0 {
+ return SeriesIDElem{}, nil
+ }
+
+ // Clear matching buffers.
+ for i := range itr.buf {
+ if itr.buf[i].SeriesID == elem.SeriesID {
+ itr.buf[i].SeriesID = 0
+ }
+ }
+ return elem, nil
+}
+
+// IntersectSeriesIDIterators returns an iterator that only returns series which
+// occur in both iterators. If both series have associated expressions then
+// they are combined together.
+func IntersectSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator {
+ if itr0 == nil || itr1 == nil {
+ if itr0 != nil {
+ itr0.Close()
+ }
+ if itr1 != nil {
+ itr1.Close()
+ }
+ return nil
+ }
+
+ return &seriesIDIntersectIterator{itrs: [2]SeriesIDIterator{itr0, itr1}}
+}
+
+// seriesIDIntersectIterator is an iterator that merges two iterators together.
+type seriesIDIntersectIterator struct {
+ buf [2]SeriesIDElem
+ itrs [2]SeriesIDIterator
+}
+
+func (itr *seriesIDIntersectIterator) Close() (err error) {
+ if e := itr.itrs[0].Close(); e != nil && err == nil {
+ err = e
+ }
+ if e := itr.itrs[1].Close(); e != nil && err == nil {
+ err = e
+ }
+ return err
+}
+
+// Next returns the next element which occurs in both iterators.
+func (itr *seriesIDIntersectIterator) Next() (_ SeriesIDElem, err error) {
+ for {
+ // Fill buffers.
+ if itr.buf[0].SeriesID == 0 {
+ if itr.buf[0], err = itr.itrs[0].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+ if itr.buf[1].SeriesID == 0 {
+ if itr.buf[1], err = itr.itrs[1].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+
+ // Exit if either buffer is still empty.
+ if itr.buf[0].SeriesID == 0 || itr.buf[1].SeriesID == 0 {
+ return SeriesIDElem{}, nil
+ }
+
+ // Skip if both series are not equal.
+ if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b {
+ itr.buf[0].SeriesID = 0
+ continue
+ } else if a > b {
+ itr.buf[1].SeriesID = 0
+ continue
+ }
+
+ // Merge series together if equal.
+ elem := itr.buf[0]
+
+ // Attach expression.
+ expr0 := itr.buf[0].Expr
+ expr1 := itr.buf[1].Expr
+ if expr0 == nil {
+ elem.Expr = expr1
+ } else if expr1 == nil {
+ elem.Expr = expr0
+ } else {
+ elem.Expr = influxql.Reduce(&influxql.BinaryExpr{
+ Op: influxql.AND,
+ LHS: expr0,
+ RHS: expr1,
+ }, nil)
+ }
+
+ itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0
+ return elem, nil
+ }
+}
+
+// UnionSeriesIDIterators returns an iterator that returns series from both
+// both iterators. If both series have associated expressions then they are
+// combined together.
+func UnionSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator {
+ // Return other iterator if either one is nil.
+ if itr0 == nil {
+ return itr1
+ } else if itr1 == nil {
+ return itr0
+ }
+
+ return &seriesIDUnionIterator{itrs: [2]SeriesIDIterator{itr0, itr1}}
+}
+
+// seriesIDUnionIterator is an iterator that unions two iterators together.
+type seriesIDUnionIterator struct {
+ buf [2]SeriesIDElem
+ itrs [2]SeriesIDIterator
+}
+
+func (itr *seriesIDUnionIterator) Close() (err error) {
+ if e := itr.itrs[0].Close(); e != nil && err == nil {
+ err = e
+ }
+ if e := itr.itrs[1].Close(); e != nil && err == nil {
+ err = e
+ }
+ return err
+}
+
+// Next returns the next element which occurs in both iterators.
+func (itr *seriesIDUnionIterator) Next() (_ SeriesIDElem, err error) {
+ // Fill buffers.
+ if itr.buf[0].SeriesID == 0 {
+ if itr.buf[0], err = itr.itrs[0].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+ if itr.buf[1].SeriesID == 0 {
+ if itr.buf[1], err = itr.itrs[1].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+
+ // Return non-zero or lesser series.
+ if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a == 0 && b == 0 {
+ return SeriesIDElem{}, nil
+ } else if b == 0 || (a != 0 && a < b) {
+ elem := itr.buf[0]
+ itr.buf[0].SeriesID = 0
+ return elem, nil
+ } else if a == 0 || (b != 0 && a > b) {
+ elem := itr.buf[1]
+ itr.buf[1].SeriesID = 0
+ return elem, nil
+ }
+
+ // Attach element.
+ elem := itr.buf[0]
+
+ // Attach expression.
+ expr0 := itr.buf[0].Expr
+ expr1 := itr.buf[1].Expr
+ if expr0 != nil && expr1 != nil {
+ elem.Expr = influxql.Reduce(&influxql.BinaryExpr{
+ Op: influxql.OR,
+ LHS: expr0,
+ RHS: expr1,
+ }, nil)
+ } else {
+ elem.Expr = nil
+ }
+
+ itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0
+ return elem, nil
+}
+
+// DifferenceSeriesIDIterators returns an iterator that only returns series which
+// occur the first iterator but not the second iterator.
+func DifferenceSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator {
+ if itr0 == nil && itr1 == nil {
+ return nil
+ } else if itr1 == nil {
+ return itr0
+ } else if itr0 == nil {
+ itr1.Close()
+ return nil
+ }
+ return &seriesIDDifferenceIterator{itrs: [2]SeriesIDIterator{itr0, itr1}}
+}
+
+// seriesIDDifferenceIterator is an iterator that merges two iterators together.
+type seriesIDDifferenceIterator struct {
+ buf [2]SeriesIDElem
+ itrs [2]SeriesIDIterator
+}
+
+func (itr *seriesIDDifferenceIterator) Close() (err error) {
+ if e := itr.itrs[0].Close(); e != nil && err == nil {
+ err = e
+ }
+ if e := itr.itrs[1].Close(); e != nil && err == nil {
+ err = e
+ }
+ return err
+}
+
+// Next returns the next element which occurs only in the first iterator.
+func (itr *seriesIDDifferenceIterator) Next() (_ SeriesIDElem, err error) {
+ for {
+ // Fill buffers.
+ if itr.buf[0].SeriesID == 0 {
+ if itr.buf[0], err = itr.itrs[0].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+ if itr.buf[1].SeriesID == 0 {
+ if itr.buf[1], err = itr.itrs[1].Next(); err != nil {
+ return SeriesIDElem{}, err
+ }
+ }
+
+ // Exit if first buffer is still empty.
+ if itr.buf[0].SeriesID == 0 {
+ return SeriesIDElem{}, nil
+ } else if itr.buf[1].SeriesID == 0 {
+ elem := itr.buf[0]
+ itr.buf[0].SeriesID = 0
+ return elem, nil
+ }
+
+ // Return first series if it's less.
+ // If second series is less then skip it.
+ // If both series are equal then skip both.
+ if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b {
+ elem := itr.buf[0]
+ itr.buf[0].SeriesID = 0
+ return elem, nil
+ } else if a > b {
+ itr.buf[1].SeriesID = 0
+ continue
+ } else {
+ itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0
+ continue
+ }
+ }
+}
+
+// seriesPointIterator adapts SeriesIterator to an influxql.Iterator.
+type seriesPointIterator struct {
+ once sync.Once
+ indexSet IndexSet
+ fieldset *MeasurementFieldSet
+ mitr MeasurementIterator
+ keys [][]byte
+ opt query.IteratorOptions
+
+ point query.FloatPoint // reusable point
+}
+
+// newSeriesPointIterator returns a new instance of seriesPointIterator.
+func NewSeriesPointIterator(indexSet IndexSet, fieldset *MeasurementFieldSet, opt query.IteratorOptions) (_ query.Iterator, err error) {
+ // Only equality operators are allowed.
+ influxql.WalkFunc(opt.Condition, func(n influxql.Node) {
+ switch n := n.(type) {
+ case *influxql.BinaryExpr:
+ switch n.Op {
+ case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX,
+ influxql.OR, influxql.AND:
+ default:
+ err = errors.New("invalid tag comparison operator")
+ }
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ mitr, err := indexSet.MeasurementIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ return &seriesPointIterator{
+ indexSet: indexSet,
+ fieldset: fieldset,
+ mitr: mitr,
+ point: query.FloatPoint{
+ Aux: make([]interface{}, len(opt.Aux)),
+ },
+ opt: opt,
+ }, nil
+}
+
+// Stats returns stats about the points processed.
+func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
+
+// Close closes the iterator.
+func (itr *seriesPointIterator) Close() (err error) {
+ itr.once.Do(func() {
+ if itr.mitr != nil {
+ err = itr.mitr.Close()
+ }
+ })
+ return err
+}
+
+// Next emits the next point in the iterator.
+func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) {
+ for {
+ // Read series keys for next measurement if no more keys remaining.
+ // Exit if there are no measurements remaining.
+ if len(itr.keys) == 0 {
+ m, err := itr.mitr.Next()
+ if err != nil {
+ return nil, err
+ } else if m == nil {
+ return nil, nil
+ }
+
+ if err := itr.readSeriesKeys(m); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ name, tags := ParseSeriesKey(itr.keys[0])
+ itr.keys = itr.keys[1:]
+
+ // TODO(edd): It seems to me like this authorisation check should be
+ // further down in the index. At this point we're going to be filtering
+ // series that have already been materialised in the LogFiles and
+ // IndexFiles.
+ if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.indexSet.Database(), name, tags) {
+ continue
+ }
+
+ // Convert to a key.
+ key := string(models.MakeKey(name, tags))
+
+ // Write auxiliary fields.
+ for i, f := range itr.opt.Aux {
+ switch f.Val {
+ case "key":
+ itr.point.Aux[i] = key
+ }
+ }
+
+ return &itr.point, nil
+ }
+}
+
+func (itr *seriesPointIterator) readSeriesKeys(name []byte) error {
+ sitr, err := itr.indexSet.MeasurementSeriesByExprIterator(name, itr.opt.Condition)
+ if err != nil {
+ return err
+ } else if sitr == nil {
+ return nil
+ }
+ defer sitr.Close()
+
+ // Slurp all series keys.
+ itr.keys = itr.keys[:0]
+ for {
+ elem, err := sitr.Next()
+ if err != nil {
+ return err
+ } else if elem.SeriesID == 0 {
+ break
+ }
+ itr.keys = append(itr.keys, itr.indexSet.SeriesFile.SeriesKey(elem.SeriesID))
+ }
+
+ // Sort keys.
+ sort.Sort(seriesKeys(itr.keys))
+ return nil
+}
+
+// MeasurementIterator represents a iterator over a list of measurements.
+type MeasurementIterator interface {
+ Close() error
+ Next() ([]byte, error)
+}
+
+type MeasurementIterators []MeasurementIterator
+
+func (a MeasurementIterators) Close() (err error) {
+ for i := range a {
+ if e := a[i].Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ return err
+}
+
+type measurementSliceIterator struct {
+ names [][]byte
+}
+
+// NewMeasurementSliceIterator returns an iterator over a slice of in-memory measurement names.
+func NewMeasurementSliceIterator(names [][]byte) *measurementSliceIterator {
+ return &measurementSliceIterator{names: names}
+}
+
+func (itr *measurementSliceIterator) Close() (err error) { return nil }
+
+func (itr *measurementSliceIterator) Next() (name []byte, err error) {
+ if len(itr.names) == 0 {
+ return nil, nil
+ }
+ name, itr.names = itr.names[0], itr.names[1:]
+ return name, nil
+}
+
+// MergeMeasurementIterators returns an iterator that merges a set of iterators.
+// Iterators that are first in the list take precendence and a deletion by those
+// early iterators will invalidate elements by later iterators.
+func MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator {
+ if len(itrs) == 0 {
+ return nil
+ } else if len(itrs) == 1 {
+ return itrs[0]
+ }
+
+ return &measurementMergeIterator{
+ buf: make([][]byte, len(itrs)),
+ itrs: itrs,
+ }
+}
+
+type measurementMergeIterator struct {
+ buf [][]byte
+ itrs []MeasurementIterator
+}
+
+func (itr *measurementMergeIterator) Close() (err error) {
+ for i := range itr.itrs {
+ if e := itr.itrs[i].Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ return err
+}
+
+// Next returns the element with the next lowest name across the iterators.
+//
+// If multiple iterators contain the same name then the first is returned
+// and the remaining ones are skipped.
+func (itr *measurementMergeIterator) Next() (_ []byte, err error) {
+ // Find next lowest name amongst the buffers.
+ var name []byte
+ for i, buf := range itr.buf {
+ // Fill buffer if empty.
+ if buf == nil {
+ if buf, err = itr.itrs[i].Next(); err != nil {
+ return nil, err
+ } else if buf != nil {
+ itr.buf[i] = buf
+ } else {
+ continue
+ }
+ }
+
+ // Find next lowest name.
+ if name == nil || bytes.Compare(itr.buf[i], name) == -1 {
+ name = itr.buf[i]
+ }
+ }
+
+ // Return nil if no elements remaining.
+ if name == nil {
+ return nil, nil
+ }
+
+ // Merge all elements together and clear buffers.
+ for i, buf := range itr.buf {
+ if buf == nil || !bytes.Equal(buf, name) {
+ continue
+ }
+ itr.buf[i] = nil
+ }
+ return name, nil
+}
+
+// TagKeyIterator represents a iterator over a list of tag keys.
+type TagKeyIterator interface {
+ Close() error
+ Next() ([]byte, error)
+}
+
+type TagKeyIterators []TagKeyIterator
+
+func (a TagKeyIterators) Close() (err error) {
+ for i := range a {
+ if e := a[i].Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ return err
+}
+
+// NewTagKeySliceIterator returns a TagKeyIterator that iterates over a slice.
+func NewTagKeySliceIterator(keys [][]byte) *tagKeySliceIterator {
+ return &tagKeySliceIterator{keys: keys}
+}
+
+// tagKeySliceIterator iterates over a slice of tag keys.
+type tagKeySliceIterator struct {
+ keys [][]byte
+}
+
+// Next returns the next tag key in the slice.
+func (itr *tagKeySliceIterator) Next() ([]byte, error) {
+ if len(itr.keys) == 0 {
+ return nil, nil
+ }
+ key := itr.keys[0]
+ itr.keys = itr.keys[1:]
+ return key, nil
+}
+
+func (itr *tagKeySliceIterator) Close() error { return nil }
+
+// MergeTagKeyIterators returns an iterator that merges a set of iterators.
+func MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator {
+ if len(itrs) == 0 {
+ return nil
+ } else if len(itrs) == 1 {
+ return itrs[0]
+ }
+
+ return &tagKeyMergeIterator{
+ buf: make([][]byte, len(itrs)),
+ itrs: itrs,
+ }
+}
+
+type tagKeyMergeIterator struct {
+ buf [][]byte
+ itrs []TagKeyIterator
+}
+
+func (itr *tagKeyMergeIterator) Close() error {
+ for i := range itr.itrs {
+ itr.itrs[i].Close()
+ }
+ return nil
+}
+
+// Next returns the element with the next lowest key across the iterators.
+//
+// If multiple iterators contain the same key then the first is returned
+// and the remaining ones are skipped.
+func (itr *tagKeyMergeIterator) Next() (_ []byte, err error) {
+ // Find next lowest key amongst the buffers.
+ var key []byte
+ for i, buf := range itr.buf {
+ // Fill buffer.
+ if buf == nil {
+ if buf, err = itr.itrs[i].Next(); err != nil {
+ return nil, err
+ } else if buf != nil {
+ itr.buf[i] = buf
+ } else {
+ continue
+ }
+ }
+
+ // Find next lowest key.
+ if key == nil || bytes.Compare(buf, key) == -1 {
+ key = buf
+ }
+ }
+
+ // Return nil if no elements remaining.
+ if key == nil {
+ return nil, nil
+ }
+
+ // Merge elements and clear buffers.
+ for i, buf := range itr.buf {
+ if buf == nil || !bytes.Equal(buf, key) {
+ continue
+ }
+ itr.buf[i] = nil
+ }
+ return key, nil
+}
+
+// TagValueIterator represents a iterator over a list of tag values.
+type TagValueIterator interface {
+ Close() error
+ Next() ([]byte, error)
+}
+
+type TagValueIterators []TagValueIterator
+
+func (a TagValueIterators) Close() (err error) {
+ for i := range a {
+ if e := a[i].Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ return err
+}
+
+// NewTagValueSliceIterator returns a TagValueIterator that iterates over a slice.
+func NewTagValueSliceIterator(values [][]byte) *tagValueSliceIterator {
+ return &tagValueSliceIterator{values: values}
+}
+
+// tagValueSliceIterator iterates over a slice of tag values.
+type tagValueSliceIterator struct {
+ values [][]byte
+}
+
+// Next returns the next tag value in the slice.
+func (itr *tagValueSliceIterator) Next() ([]byte, error) {
+ if len(itr.values) == 0 {
+ return nil, nil
+ }
+ value := itr.values[0]
+ itr.values = itr.values[1:]
+ return value, nil
+}
+
+func (itr *tagValueSliceIterator) Close() error { return nil }
+
+// MergeTagValueIterators returns an iterator that merges a set of iterators.
+func MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator {
+ if len(itrs) == 0 {
+ return nil
+ } else if len(itrs) == 1 {
+ return itrs[0]
+ }
+
+ return &tagValueMergeIterator{
+ buf: make([][]byte, len(itrs)),
+ itrs: itrs,
+ }
+}
+
+type tagValueMergeIterator struct {
+ buf [][]byte
+ itrs []TagValueIterator
+}
+
+func (itr *tagValueMergeIterator) Close() error {
+ for i := range itr.itrs {
+ itr.itrs[i].Close()
+ }
+ return nil
+}
+
+// Next returns the element with the next lowest value across the iterators.
+//
+// If multiple iterators contain the same value then the first is returned
+// and the remaining ones are skipped.
+func (itr *tagValueMergeIterator) Next() (_ []byte, err error) {
+ // Find next lowest value amongst the buffers.
+ var value []byte
+ for i, buf := range itr.buf {
+ // Fill buffer.
+ if buf == nil {
+ if buf, err = itr.itrs[i].Next(); err != nil {
+ return nil, err
+ } else if buf != nil {
+ itr.buf[i] = buf
+ } else {
+ continue
+ }
+ }
+
+ // Find next lowest value.
+ if value == nil || bytes.Compare(buf, value) == -1 {
+ value = buf
+ }
+ }
+
+ // Return nil if no elements remaining.
+ if value == nil {
+ return nil, nil
+ }
+
+ // Merge elements and clear buffers.
+ for i, buf := range itr.buf {
+ if buf == nil || !bytes.Equal(buf, value) {
+ continue
+ }
+ itr.buf[i] = nil
+ }
+ return value, nil
+}
+
+// IndexSet represents a list of indexes.
+type IndexSet struct {
+ Indexes []Index
+ SeriesFile *SeriesFile
+}
+
+// Database returns the database name of the first index.
+func (is IndexSet) Database() string {
+ if len(is.Indexes) == 0 {
+ return ""
+ }
+ return is.Indexes[0].Database()
+}
+
+// FieldSet returns the fieldset of the first index.
+func (is IndexSet) FieldSet() *MeasurementFieldSet {
+ if len(is.Indexes) == 0 {
+ return nil
+ }
+ return is.Indexes[0].FieldSet()
+}
+
+// DedupeInmemIndexes returns an index set which removes duplicate in-memory indexes.
+func (is IndexSet) DedupeInmemIndexes() IndexSet {
+ other := IndexSet{Indexes: make([]Index, 0, len(is.Indexes)), SeriesFile: is.SeriesFile}
+
+ var hasInmem bool
+ for _, idx := range is.Indexes {
+ if idx.Type() == "inmem" {
+ if !hasInmem {
+ other.Indexes = append(other.Indexes, idx)
+ hasInmem = true
+ }
+ continue
+ }
+ other.Indexes = append(other.Indexes, idx)
+ }
+ return other
+}
+
+func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
+ // Return filtered list if expression exists.
+ if expr != nil {
+ return is.measurementNamesByExpr(auth, expr)
+ }
+
+ itr, err := is.MeasurementIterator()
+ if err != nil {
+ return nil, err
+ } else if itr == nil {
+ return nil, nil
+ }
+ defer itr.Close()
+
+ // Iterate over all measurements if no condition exists.
+ var names [][]byte
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e == nil {
+ break
+ }
+
+ // Determine if there exists at least one authorised series for the
+ // measurement name.
+ if is.measurementAuthorizedSeries(auth, e) {
+ names = append(names, e)
+ }
+ }
+ return names, nil
+}
+
+func (is IndexSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
+ if expr == nil {
+ return nil, nil
+ }
+
+ switch e := expr.(type) {
+ case *influxql.BinaryExpr:
+ switch e.Op {
+ case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:
+ tag, ok := e.LHS.(*influxql.VarRef)
+ if !ok {
+ return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String())
+ }
+
+ // Retrieve value or regex expression from RHS.
+ var value string
+ var regex *regexp.Regexp
+ if influxql.IsRegexOp(e.Op) {
+ re, ok := e.RHS.(*influxql.RegexLiteral)
+ if !ok {
+ return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String())
+ }
+ regex = re.Val
+ } else {
+ s, ok := e.RHS.(*influxql.StringLiteral)
+ if !ok {
+ return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String())
+ }
+ value = s.Val
+ }
+
+ // Match on name, if specified.
+ if tag.Val == "_name" {
+ return is.measurementNamesByNameFilter(auth, e.Op, value, regex)
+ } else if influxql.IsSystemName(tag.Val) {
+ return nil, nil
+ }
+ return is.measurementNamesByTagFilter(auth, e.Op, tag.Val, value, regex)
+
+ case influxql.OR, influxql.AND:
+ lhs, err := is.measurementNamesByExpr(auth, e.LHS)
+ if err != nil {
+ return nil, err
+ }
+
+ rhs, err := is.measurementNamesByExpr(auth, e.RHS)
+ if err != nil {
+ return nil, err
+ }
+
+ if e.Op == influxql.OR {
+ return bytesutil.Union(lhs, rhs), nil
+ }
+ return bytesutil.Intersect(lhs, rhs), nil
+
+ default:
+ return nil, fmt.Errorf("invalid tag comparison operator")
+ }
+
+ case *influxql.ParenExpr:
+ return is.measurementNamesByExpr(auth, e.Expr)
+ default:
+ return nil, fmt.Errorf("%#v", expr)
+ }
+}
+
+// measurementNamesByNameFilter returns matching measurement names in sorted order.
+func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) ([][]byte, error) {
+ itr, err := is.MeasurementIterator()
+ if err != nil {
+ return nil, err
+ } else if itr == nil {
+ return nil, nil
+ }
+ defer itr.Close()
+
+ var names [][]byte
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e == nil {
+ break
+ }
+
+ var matched bool
+ switch op {
+ case influxql.EQ:
+ matched = string(e) == val
+ case influxql.NEQ:
+ matched = string(e) != val
+ case influxql.EQREGEX:
+ matched = regex.Match(e)
+ case influxql.NEQREGEX:
+ matched = !regex.Match(e)
+ }
+
+ if matched && is.measurementAuthorizedSeries(auth, e) {
+ names = append(names, e)
+ }
+ }
+ bytesutil.Sort(names)
+ return names, nil
+}
+
+func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) {
+ var names [][]byte
- // InfluxQL system iterators
- MeasurementSeriesKeysByExprIterator(name []byte, condition influxql.Expr) (SeriesIterator, error)
- MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error)
- SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error)
+ mitr, err := is.MeasurementIterator()
+ if err != nil {
+ return nil, err
+ } else if mitr == nil {
+ return nil, nil
+ }
+ defer mitr.Close()
- // Sets a shared fieldset from the engine.
- SetFieldSet(fs *MeasurementFieldSet)
+ // valEqual determines if the provided []byte is equal to the tag value
+ // to be filtered on.
+ valEqual := regex.Match
+ if op == influxql.EQ || op == influxql.NEQ {
+ vb := []byte(val)
+ valEqual = func(b []byte) bool { return bytes.Equal(vb, b) }
+ }
- // Creates hard links inside path for snapshotting.
- SnapshotTo(path string) error
+ var tagMatch bool
+ var authorized bool
+ for {
+ me, err := mitr.Next()
+ if err != nil {
+ return nil, err
+ } else if me == nil {
+ break
+ }
+ // If the measurement doesn't have the tag key, then it won't be considered.
+ if ok, err := is.HasTagKey(me, []byte(key)); err != nil {
+ return nil, err
+ } else if !ok {
+ continue
+ }
+ tagMatch = false
+ // Authorization must be explicitly granted when an authorizer is present.
+ authorized = auth == nil
- // Size of the index on disk, if applicable.
- DiskSizeBytes() int64
+ vitr, err := is.TagValueIterator(me, []byte(key))
+ if err != nil {
+ return nil, err
+ }
- // To be removed w/ tsi1.
- SetFieldName(measurement []byte, name string)
- AssignShard(k string, shardID uint64)
- UnassignShard(k string, shardID uint64, ts int64) error
- RemoveShard(shardID uint64)
+ if vitr != nil {
+ defer vitr.Close()
+ for {
+ ve, err := vitr.Next()
+ if err != nil {
+ return nil, err
+ } else if ve == nil {
+ break
+ }
+ if !valEqual(ve) {
+ continue
- Type() string
+ }
- Rebuild()
+ tagMatch = true
+ if auth == nil {
+ break
+ }
+
+ // When an authorizer is present, the measurement should be
+ // included only if one of it's series is authorized.
+ sitr, err := is.TagValueSeriesIDIterator(me, []byte(key), ve)
+ if err != nil {
+ return nil, err
+ } else if sitr == nil {
+ continue
+ }
+ defer sitr.Close()
+
+ // Locate a series with this matching tag value that's authorized.
+ for {
+ se, err := sitr.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ if se.SeriesID == 0 {
+ break
+ }
+
+ name, tags := is.SeriesFile.Series(se.SeriesID)
+ if auth.AuthorizeSeriesRead(is.Database(), name, tags) {
+ authorized = true
+ break
+ }
+ }
+
+ if err := sitr.Close(); err != nil {
+ return nil, err
+ }
+
+ if tagMatch && authorized {
+ // The measurement can definitely be included or rejected.
+ break
+ }
+ }
+ if err := vitr.Close(); err != nil {
+ return nil, err
+ }
+ }
+
+ // For negation operators, to determine if the measurement is authorized,
+ // an authorized series belonging to the measurement must be located.
+ // Then, the measurement can be added iff !tagMatch && authorized.
+ if (op == influxql.NEQ || op == influxql.NEQREGEX) && !tagMatch {
+ authorized = is.measurementAuthorizedSeries(auth, me)
+ }
+
+ // tags match | operation is EQ | measurement matches
+ // --------------------------------------------------
+ // True | True | True
+ // True | False | False
+ // False | True | False
+ // False | False | True
+ if tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) && authorized {
+ names = append(names, me)
+ continue
+ }
+ }
+
+ bytesutil.Sort(names)
+ return names, nil
}
-// SeriesElem represents a generic series element.
-type SeriesElem interface {
- Name() []byte
- Tags() models.Tags
- Deleted() bool
+// measurementAuthorizedSeries determines if the measurement contains a series
+// that is authorized to be read.
+func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte) bool {
+ if auth == nil {
+ return true
+ }
- // InfluxQL expression associated with series during filtering.
- Expr() influxql.Expr
+ sitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil || sitr == nil {
+ return false
+ }
+ defer sitr.Close()
+
+ for {
+ series, err := sitr.Next()
+ if err != nil {
+ return false
+ }
+
+ if series.SeriesID == 0 {
+ return false // End of iterator
+ }
+
+ name, tags := is.SeriesFile.Series(series.SeriesID)
+ if auth.AuthorizeSeriesRead(is.Database(), name, tags) {
+ return true
+ }
+ }
}
-// SeriesIterator represents a iterator over a list of series.
-type SeriesIterator interface {
- Next() SeriesElem
+// HasTagKey returns true if the tag key exists in any index for the provided
+// measurement.
+func (is IndexSet) HasTagKey(name, key []byte) (bool, error) {
+ for _, idx := range is.Indexes {
+ if ok, err := idx.HasTagKey(name, key); err != nil {
+ return false, err
+ } else if ok {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// HasTagValue returns true if the tag value exists in any index for the provided
+// measurement and tag key.
+func (is IndexSet) HasTagValue(name, key, value []byte) (bool, error) {
+ for _, idx := range is.Indexes {
+ if ok, err := idx.HasTagValue(name, key, value); err != nil {
+ return false, err
+ } else if ok {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// MeasurementIterator returns an iterator over all measurements in the index.
+func (is IndexSet) MeasurementIterator() (MeasurementIterator, error) {
+ a := make([]MeasurementIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.MeasurementIterator()
+ if err != nil {
+ MeasurementIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return MergeMeasurementIterators(a...), nil
+}
+
+// TagKeyIterator returns a key iterator for a measurement.
+func (is IndexSet) TagKeyIterator(name []byte) (TagKeyIterator, error) {
+ a := make([]TagKeyIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.TagKeyIterator(name)
+ if err != nil {
+ TagKeyIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return MergeTagKeyIterators(a...), nil
+}
+
+// TagValueIterator returns a value iterator for a tag key.
+func (is IndexSet) TagValueIterator(name, key []byte) (TagValueIterator, error) {
+ a := make([]TagValueIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.TagValueIterator(name, key)
+ if err != nil {
+ TagValueIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return MergeTagValueIterators(a...), nil
+}
+
+// TagKeyHasAuthorizedSeries determines if there exists an authorized series for
+// the provided measurement name and tag key.
+func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.Authorizer, name, tagKey []byte) (bool, error) {
+ itr, err := is.TagKeySeriesIDIterator(name, tagKey)
+ if err != nil {
+ return false, err
+ } else if itr == nil {
+ return false, nil
+ }
+ defer itr.Close()
+
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return false, err
+ }
+
+ if e.SeriesID == 0 {
+ return false, nil
+ }
+
+ if auth == nil || auth == query.OpenAuthorizer {
+ return true, nil
+ }
+
+ name, tags := is.SeriesFile.Series(e.SeriesID)
+ if auth.AuthorizeSeriesRead(is.Database(), name, tags) {
+ return true, nil
+ }
+ }
+}
+
+// MeasurementSeriesIDIterator returns an iterator over all non-tombstoned series
+// for the provided measurement.
+func (is IndexSet) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) {
+ a := make([]SeriesIDIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ SeriesIDIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil
+}
+
+// ForEachMeasurementTagKey iterates over all tag keys in a measurement and applies
+// the provided function.
+func (is IndexSet) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
+ itr, err := is.TagKeyIterator(name)
+ if err != nil {
+ return err
+ } else if itr == nil {
+ return nil
+ }
+ defer itr.Close()
+
+ for {
+ key, err := itr.Next()
+ if err != nil {
+ return err
+ } else if key == nil {
+ return nil
+ }
+
+ if err := fn(key); err != nil {
+ return err
+ }
+ }
+}
+
+// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
+func (is IndexSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
+ keys := make(map[string]struct{})
+ for _, idx := range is.Indexes {
+ m, err := idx.MeasurementTagKeysByExpr(name, expr)
+ if err != nil {
+ return nil, err
+ }
+ for k := range m {
+ keys[k] = struct{}{}
+ }
+ }
+ return keys, nil
+}
+
+// TagKeySeriesIDIterator returns a series iterator for all values across a single key.
+func (is IndexSet) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) {
+ a := make([]SeriesIDIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.TagKeySeriesIDIterator(name, key)
+ if err != nil {
+ SeriesIDIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil
+}
+
+// TagValueSeriesIDIterator returns a series iterator for a single tag value.
+func (is IndexSet) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) {
+ a := make([]SeriesIDIterator, 0, len(is.Indexes))
+ for _, idx := range is.Indexes {
+ itr, err := idx.TagValueSeriesIDIterator(name, key, value)
+ if err != nil {
+ SeriesIDIterators(a).Close()
+ return nil, err
+ } else if itr != nil {
+ a = append(a, itr)
+ }
+ }
+ return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil
+}
+
+// MeasurementSeriesByExprIterator returns a series iterator for a measurement
+// that is filtered by expr. If expr only contains time expressions then this
+// call is equivalent to MeasurementSeriesIDIterator().
+func (is IndexSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) {
+ // Return all series for the measurement if there are no tag expressions.
+ if expr == nil {
+ return is.MeasurementSeriesIDIterator(name)
+ }
+ fieldset := is.FieldSet()
+
+ itr, err := is.seriesByExprIterator(name, expr, fieldset.CreateFieldsIfNotExists(name))
+ if err != nil {
+ return nil, err
+ }
+ return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil
+}
+
+// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
+func (is IndexSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
+ // Create iterator for all matching series.
+ itr, err := is.MeasurementSeriesByExprIterator(name, expr)
+ if err != nil {
+ return nil, err
+ } else if itr == nil {
+ return nil, nil
+ }
+ defer itr.Close()
+
+ // Iterate over all series and generate keys.
+ var keys [][]byte
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e.SeriesID == 0 {
+ break
+ }
+
+ // Check for unsupported field filters.
+ // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).
+ if e.Expr != nil {
+ if v, ok := e.Expr.(*influxql.BooleanLiteral); !ok || !v.Val {
+ return nil, errors.New("fields not supported in WHERE clause during deletion")
+ }
+ }
+
+ seriesKey := is.SeriesFile.SeriesKey(e.SeriesID)
+ assert(seriesKey != nil, fmt.Sprintf("series key for ID: %d not found", e.SeriesID))
+
+ name, tags := ParseSeriesKey(seriesKey)
+ keys = append(keys, models.MakeKey(name, tags))
+ }
+
+ bytesutil.Sort(keys)
+
+ return keys, nil
+}
+
+func (is IndexSet) seriesByExprIterator(name []byte, expr influxql.Expr, mf *MeasurementFields) (SeriesIDIterator, error) {
+ switch expr := expr.(type) {
+ case *influxql.BinaryExpr:
+ switch expr.Op {
+ case influxql.AND, influxql.OR:
+ // Get the series IDs and filter expressions for the LHS.
+ litr, err := is.seriesByExprIterator(name, expr.LHS, mf)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the series IDs and filter expressions for the RHS.
+ ritr, err := is.seriesByExprIterator(name, expr.RHS, mf)
+ if err != nil {
+ if litr != nil {
+ litr.Close()
+ }
+ return nil, err
+ }
+
+ // Intersect iterators if expression is "AND".
+ if expr.Op == influxql.AND {
+ return IntersectSeriesIDIterators(litr, ritr), nil
+ }
+
+ // Union iterators if expression is "OR".
+ return UnionSeriesIDIterators(litr, ritr), nil
+
+ default:
+ return is.seriesByBinaryExprIterator(name, expr, mf)
+ }
+
+ case *influxql.ParenExpr:
+ return is.seriesByExprIterator(name, expr.Expr, mf)
+
+ default:
+ return nil, nil
+ }
+}
+
+// seriesByBinaryExprIterator returns a series iterator and a filtering expression.
+func (is IndexSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr, mf *MeasurementFields) (SeriesIDIterator, error) {
+ // If this binary expression has another binary expression, then this
+ // is some expression math and we should just pass it to the underlying query.
+ if _, ok := n.LHS.(*influxql.BinaryExpr); ok {
+ itr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+ return newSeriesIDExprIterator(itr, n), nil
+ } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok {
+ itr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+ return newSeriesIDExprIterator(itr, n), nil
+ }
+
+ // Retrieve the variable reference from the correct side of the expression.
+ key, ok := n.LHS.(*influxql.VarRef)
+ value := n.RHS
+ if !ok {
+ key, ok = n.RHS.(*influxql.VarRef)
+ if !ok {
+ return nil, fmt.Errorf("invalid expression: %s", n.String())
+ }
+ value = n.LHS
+ }
+
+ // For fields, return all series from this measurement.
+ if key.Val != "_name" && ((key.Type == influxql.Unknown && mf.HasField(key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) {
+ itr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+ return newSeriesIDExprIterator(itr, n), nil
+ } else if value, ok := value.(*influxql.VarRef); ok {
+ // Check if the RHS is a variable and if it is a field.
+ if value.Val != "_name" && ((value.Type == influxql.Unknown && mf.HasField(value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) {
+ itr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+ return newSeriesIDExprIterator(itr, n), nil
+ }
+ }
+
+ // Create iterator based on value type.
+ switch value := value.(type) {
+ case *influxql.StringLiteral:
+ return is.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op)
+ case *influxql.RegexLiteral:
+ return is.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op)
+ case *influxql.VarRef:
+ return is.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op)
+ default:
+ if n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX {
+ return is.MeasurementSeriesIDIterator(name)
+ }
+ return nil, nil
+ }
+}
+
+func (is IndexSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (SeriesIDIterator, error) {
+ // Special handling for "_name" to match measurement name.
+ if bytes.Equal(key, []byte("_name")) {
+ if (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) {
+ return is.MeasurementSeriesIDIterator(name)
+ }
+ return nil, nil
+ }
+
+ if op == influxql.EQ {
+ // Match a specific value.
+ if len(value) != 0 {
+ return is.TagValueSeriesIDIterator(name, key, value)
+ }
+
+ mitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+
+ kitr, err := is.TagKeySeriesIDIterator(name, key)
+ if err != nil {
+ if mitr != nil {
+ mitr.Close()
+ }
+ return nil, err
+ }
+
+ // Return all measurement series that have no values from this tag key.
+ return DifferenceSeriesIDIterators(mitr, kitr), nil
+ }
+
+ // Return all measurement series without this tag value.
+ if len(value) != 0 {
+ mitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+
+ vitr, err := is.TagValueSeriesIDIterator(name, key, value)
+ if err != nil {
+ if mitr != nil {
+ mitr.Close()
+ }
+ return nil, err
+ }
+
+ return DifferenceSeriesIDIterators(mitr, vitr), nil
+ }
+
+ // Return all series across all values of this tag key.
+ return is.TagKeySeriesIDIterator(name, key)
+}
+
+func (is IndexSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (SeriesIDIterator, error) {
+ // Special handling for "_name" to match measurement name.
+ if bytes.Equal(key, []byte("_name")) {
+ match := value.Match(name)
+ if (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) {
+ mitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ return nil, err
+ }
+ return newSeriesIDExprIterator(mitr, &influxql.BooleanLiteral{Val: true}), nil
+ }
+ return nil, nil
+ }
+ return is.MatchTagValueSeriesIDIterator(name, key, value, op == influxql.EQREGEX)
+}
+
+func (is IndexSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (SeriesIDIterator, error) {
+ itr0, err := is.TagKeySeriesIDIterator(name, key)
+ if err != nil {
+ return nil, err
+ }
+
+ itr1, err := is.TagKeySeriesIDIterator(name, []byte(value.Val))
+ if err != nil {
+ if itr0 != nil {
+ itr0.Close()
+ }
+ return nil, err
+ }
+
+ if op == influxql.EQ {
+ return IntersectSeriesIDIterators(itr0, itr1), nil
+ }
+ return DifferenceSeriesIDIterators(itr0, itr1), nil
+}
+
+// MatchTagValueSeriesIDIterator returns a series iterator for tags which match value.
+// If matches is false, returns iterators which do not match value.
+func (is IndexSet) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error) {
+ matchEmpty := value.MatchString("")
+
+ if matches {
+ if matchEmpty {
+ return is.matchTagValueEqualEmptySeriesIDIterator(name, key, value)
+ }
+ return is.matchTagValueEqualNotEmptySeriesIDIterator(name, key, value)
+ }
+
+ if matchEmpty {
+ return is.matchTagValueNotEqualEmptySeriesIDIterator(name, key, value)
+ }
+ return is.matchTagValueNotEqualNotEmptySeriesIDIterator(name, key, value)
+}
+
+func (is IndexSet) matchTagValueEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) {
+ vitr, err := is.TagValueIterator(name, key)
+ if err != nil {
+ return nil, err
+ } else if vitr == nil {
+ return is.MeasurementSeriesIDIterator(name)
+ }
+ defer vitr.Close()
+
+ var itrs []SeriesIDIterator
+ if err := func() error {
+ for {
+ e, err := vitr.Next()
+ if err != nil {
+ return err
+ } else if e == nil {
+ break
+ }
+
+ if !value.Match(e) {
+ itr, err := is.TagValueSeriesIDIterator(name, key, e)
+ if err != nil {
+ return err
+ }
+ itrs = append(itrs, itr)
+ }
+ }
+ return nil
+ }(); err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+
+ mitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+
+ return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil
+}
+
+func (is IndexSet) matchTagValueEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) {
+ vitr, err := is.TagValueIterator(name, key)
+ if err != nil {
+ return nil, err
+ } else if vitr == nil {
+ return nil, nil
+ }
+ defer vitr.Close()
+
+ var itrs []SeriesIDIterator
+ for {
+ e, err := vitr.Next()
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ } else if e == nil {
+ break
+ }
+
+ if value.Match(e) {
+ itr, err := is.TagValueSeriesIDIterator(name, key, e)
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+ itrs = append(itrs, itr)
+ }
+ }
+ return MergeSeriesIDIterators(itrs...), nil
+}
+
+func (is IndexSet) matchTagValueNotEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) {
+ vitr, err := is.TagValueIterator(name, key)
+ if err != nil {
+ return nil, err
+ } else if vitr == nil {
+ return nil, nil
+ }
+ defer vitr.Close()
+
+ var itrs []SeriesIDIterator
+ for {
+ e, err := vitr.Next()
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ } else if e == nil {
+ break
+ }
+
+ if !value.Match(e) {
+ itr, err := is.TagValueSeriesIDIterator(name, key, e)
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+ itrs = append(itrs, itr)
+ }
+ }
+ return MergeSeriesIDIterators(itrs...), nil
+}
+
+func (is IndexSet) matchTagValueNotEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) {
+ vitr, err := is.TagValueIterator(name, key)
+ if err != nil {
+ return nil, err
+ } else if vitr == nil {
+ return is.MeasurementSeriesIDIterator(name)
+ }
+ defer vitr.Close()
+
+ var itrs []SeriesIDIterator
+ for {
+ e, err := vitr.Next()
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ } else if e == nil {
+ break
+ }
+ if value.Match(e) {
+ itr, err := is.TagValueSeriesIDIterator(name, key, e)
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+ itrs = append(itrs, itr)
+ }
+ }
+
+ mitr, err := is.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ SeriesIDIterators(itrs).Close()
+ return nil, err
+ }
+ return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil
+}
+
+// TagValuesByKeyAndExpr retrieves tag values for the provided tag keys.
+//
+// TagValuesByKeyAndExpr returns sets of values for each key, indexable by the
+// position of the tag key in the keys argument.
+//
+// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending
+// lexicographic order.
+func (is IndexSet) TagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, fieldset *MeasurementFieldSet) ([]map[string]struct{}, error) {
+ database := is.Database()
+
+ itr, err := is.seriesByExprIterator(name, expr, fieldset.Fields(string(name)))
+ if err != nil {
+ return nil, err
+ } else if itr == nil {
+ return nil, nil
+ }
+ itr = FilterUndeletedSeriesIDIterator(is.SeriesFile, itr)
+ defer itr.Close()
+
+ keyIdxs := make(map[string]int, len(keys))
+ for ki, key := range keys {
+ keyIdxs[key] = ki
+
+ // Check that keys are in order.
+ if ki > 0 && key < keys[ki-1] {
+ return nil, fmt.Errorf("keys %v are not in ascending order", keys)
+ }
+ }
+
+ resultSet := make([]map[string]struct{}, len(keys))
+ for i := 0; i < len(resultSet); i++ {
+ resultSet[i] = make(map[string]struct{})
+ }
+
+ // Iterate all series to collect tag values.
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e.SeriesID == 0 {
+ break
+ }
+
+ buf := is.SeriesFile.SeriesKey(e.SeriesID)
+ if buf == nil {
+ continue
+ }
+
+ if auth != nil {
+ name, tags := ParseSeriesKey(buf)
+ if !auth.AuthorizeSeriesRead(database, name, tags) {
+ continue
+ }
+ }
+
+ _, buf = ReadSeriesKeyLen(buf)
+ _, buf = ReadSeriesKeyMeasurement(buf)
+ tagN, buf := ReadSeriesKeyTagN(buf)
+ for i := 0; i < tagN; i++ {
+ var key, value []byte
+ key, value, buf = ReadSeriesKeyTag(buf)
+
+ if idx, ok := keyIdxs[string(key)]; ok {
+ resultSet[idx][string(value)] = struct{}{}
+ } else if string(key) > keys[len(keys)-1] {
+ // The tag key is > the largest key we're interested in.
+ break
+ }
+ }
+ }
+ return resultSet, nil
+}
+
+// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
+func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
+ if len(keys) == 0 {
+ return nil, nil
+ }
+
+ results := make([][]string, len(keys))
+ // If the keys are not sorted, then sort them.
+ if !keysSorted {
+ sort.Sort(sort.StringSlice(keys))
+ }
+
+ // No expression means that the values shouldn't be filtered; so fetch them
+ // all.
+ if expr == nil {
+ for ki, key := range keys {
+ vitr, err := is.TagValueIterator(name, []byte(key))
+ if err != nil {
+ return nil, err
+ } else if vitr == nil {
+ break
+ }
+ defer vitr.Close()
+
+ // If no authorizer present then return all values.
+ if auth == nil {
+ for {
+ val, err := vitr.Next()
+ if err != nil {
+ return nil, err
+ } else if val == nil {
+ break
+ }
+ results[ki] = append(results[ki], string(val))
+ }
+ continue
+ }
+
+ // Authorization is present — check all series with matching tag values
+ // and measurements for the presence of an authorized series.
+ for {
+ val, err := vitr.Next()
+ if err != nil {
+ return nil, err
+ } else if val == nil {
+ break
+ }
+
+ sitr, err := is.TagValueSeriesIDIterator(name, []byte(key), val)
+ if err != nil {
+ return nil, err
+ } else if sitr == nil {
+ break
+ }
+ defer sitr.Close()
+
+ for {
+ se, err := sitr.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ if se.SeriesID == 0 {
+ break
+ }
+
+ name, tags := is.SeriesFile.Series(se.SeriesID)
+ if auth.AuthorizeSeriesRead(is.Database(), name, tags) {
+ results[ki] = append(results[ki], string(val))
+ break
+ }
+ }
+ if err := sitr.Close(); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return results, nil
+ }
+
+ // This is the case where we have filtered series by some WHERE condition.
+ // We only care about the tag values for the keys given the
+ // filtered set of series ids.
+ resultSet, err := is.TagValuesByKeyAndExpr(auth, name, keys, expr, is.FieldSet())
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert result sets into []string
+ for i, s := range resultSet {
+ values := make([]string, 0, len(s))
+ for v := range s {
+ values = append(values, v)
+ }
+ sort.Sort(sort.StringSlice(values))
+ results[i] = values
+ }
+ return results, nil
+}
+
+// TagSets returns an ordered list of tag sets for a measurement by dimension
+// and filtered by an optional conditional expression.
+func (is IndexSet) TagSets(sfile *SeriesFile, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) {
+ itr, err := is.MeasurementSeriesByExprIterator(name, opt.Condition)
+ if err != nil {
+ return nil, err
+ } else if itr != nil {
+ defer itr.Close()
+ }
+
+ // For every series, get the tag values for the requested tag keys i.e.
+ // dimensions. This is the TagSet for that series. Series with the same
+ // TagSet are then grouped together, because for the purpose of GROUP BY
+ // they are part of the same composite series.
+ tagSets := make(map[string]*query.TagSet, 64)
+
+ if itr != nil {
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return nil, err
+ } else if e.SeriesID == 0 {
+ break
+ }
+
+ _, tags := ParseSeriesKey(sfile.SeriesKey(e.SeriesID))
+ if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(is.Database(), name, tags) {
+ continue
+ }
+
+ tagsMap := make(map[string]string, len(opt.Dimensions))
+
+ // Build the TagSet for this series.
+ for _, dim := range opt.Dimensions {
+ tagsMap[dim] = tags.GetString(dim)
+ }
+
+ // Convert the TagSet to a string, so it can be added to a map
+ // allowing TagSets to be handled as a set.
+ tagsAsKey := MarshalTags(tagsMap)
+ tagSet, ok := tagSets[string(tagsAsKey)]
+ if !ok {
+ // This TagSet is new, create a new entry for it.
+ tagSet = &query.TagSet{
+ Tags: tagsMap,
+ Key: tagsAsKey,
+ }
+ }
+
+ // Associate the series and filter with the Tagset.
+ tagSet.AddFilter(string(models.MakeKey(name, tags)), e.Expr)
+
+ // Ensure it's back in the map.
+ tagSets[string(tagsAsKey)] = tagSet
+ }
+ }
+
+ // Sort the series in each tag set.
+ for _, t := range tagSets {
+ sort.Sort(t)
+ }
+
+ // The TagSets have been created, as a map of TagSets. Just send
+ // the values back as a slice, sorting for consistency.
+ sortedTagsSets := make([]*query.TagSet, 0, len(tagSets))
+ for _, v := range tagSets {
+ sortedTagsSets = append(sortedTagsSets, v)
+ }
+ sort.Sort(byTagKey(sortedTagsSets))
+
+ return sortedTagsSets, nil
}
// IndexFormat represents the format for an index.
@@ -94,7 +2264,7 @@ const (
)
// NewIndexFunc creates a new index.
-type NewIndexFunc func(id uint64, database, path string, options EngineOptions) Index
+type NewIndexFunc func(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index
// newIndexFuncs is a lookup of index constructors by name.
var newIndexFuncs = make(map[string]NewIndexFunc)
@@ -119,7 +2289,7 @@ func RegisteredIndexes() []string {
// NewIndex returns an instance of an index based on its format.
// If the path does not exist then the DefaultFormat is used.
-func NewIndex(id uint64, database, path string, options EngineOptions) (Index, error) {
+func NewIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) (Index, error) {
format := options.IndexVersion
// Use default format unless existing directory exists.
@@ -137,11 +2307,11 @@ func NewIndex(id uint64, database, path string, options EngineOptions) (Index, e
if fn == nil {
return nil, fmt.Errorf("invalid index format: %q", format)
}
- return fn(id, database, path, options), nil
+ return fn(id, database, path, seriesIDSet, sfile, options), nil
}
-func MustOpenIndex(id uint64, database, path string, options EngineOptions) Index {
- idx, err := NewIndex(id, database, path, options)
+func MustOpenIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index {
+ idx, err := NewIndex(id, database, path, seriesIDSet, sfile, options)
if err != nil {
panic(err)
} else if err := idx.Open(); err != nil {
@@ -149,3 +2319,19 @@ func MustOpenIndex(id uint64, database, path string, options EngineOptions) Inde
}
return idx
}
+
+// assert will panic with a given formatted message if the given condition is false.
+func assert(condition bool, msg string, v ...interface{}) {
+ if !condition {
+ panic(fmt.Sprintf("assert failed: "+msg, v...))
+ }
+}
+
+type byTagKey []*query.TagSet
+
+func (t byTagKey) Len() int { return len(t) }
+func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 }
+func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+
+// TEMP
+func dump(v interface{}) { spew.Dump(v) }
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go
index d8eb0873eb..f5b5f4905b 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go
@@ -18,7 +18,6 @@ import (
"sort"
"sync"
"time"
- // "sync/atomic"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/bytesutil"
@@ -35,10 +34,10 @@ import (
const IndexName = "inmem"
func init() {
- tsdb.NewInmemIndex = func(name string) (interface{}, error) { return NewIndex(name), nil }
+ tsdb.NewInmemIndex = func(name string, sfile *tsdb.SeriesFile) (interface{}, error) { return NewIndex(name, sfile), nil }
- tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {
- return NewShardIndex(id, database, path, opt)
+ tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, seriesIDSet *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index {
+ return NewShardIndex(id, database, path, seriesIDSet, sfile, opt)
})
}
@@ -49,11 +48,12 @@ type Index struct {
mu sync.RWMutex
database string
+ sfile *tsdb.SeriesFile
+ fieldset *tsdb.MeasurementFieldSet
// In-memory metadata index, built on load and updated when new series come in
- measurements map[string]*Measurement // measurement name to object and index
- series map[string]*Series // map series key to the Series object
- lastID uint64 // last used series ID. They're in memory only for this shard
+ measurements map[string]*measurement // measurement name to object and index
+ series map[string]*series // map series key to the Series object
seriesSketch, seriesTSSketch *hll.Plus
measurementsSketch, measurementsTSSketch *hll.Plus
@@ -63,11 +63,12 @@ type Index struct {
}
// NewIndex returns a new initialized Index.
-func NewIndex(database string) *Index {
+func NewIndex(database string, sfile *tsdb.SeriesFile) *Index {
index := &Index{
database: database,
- measurements: make(map[string]*Measurement),
- series: make(map[string]*Series),
+ sfile: sfile,
+ measurements: make(map[string]*measurement),
+ series: make(map[string]*series),
}
index.seriesSketch = hll.NewDefaultPlus()
@@ -84,8 +85,13 @@ func (i *Index) Close() error { return nil }
func (i *Index) WithLogger(*zap.Logger) {}
+// Database returns the name of the database the index was initialized with.
+func (i *Index) Database() string {
+ return i.database
+}
+
// Series returns a series by key.
-func (i *Index) Series(key []byte) (*Series, error) {
+func (i *Index) Series(key []byte) (*series, error) {
i.mu.RLock()
s := i.series[string(key)]
i.mu.RUnlock()
@@ -110,7 +116,7 @@ func (i *Index) SeriesN() int64 {
}
// Measurement returns the measurement object from the index by the name
-func (i *Index) Measurement(name []byte) (*Measurement, error) {
+func (i *Index) Measurement(name []byte) (*measurement, error) {
i.mu.RLock()
defer i.mu.RUnlock()
return i.measurements[string(name)], nil
@@ -131,11 +137,11 @@ func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, erro
}
// MeasurementsByName returns a list of measurements.
-func (i *Index) MeasurementsByName(names [][]byte) ([]*Measurement, error) {
+func (i *Index) MeasurementsByName(names [][]byte) ([]*measurement, error) {
i.mu.RLock()
defer i.mu.RUnlock()
- a := make([]*Measurement, 0, len(names))
+ a := make([]*measurement, 0, len(names))
for _, name := range names {
if m := i.measurements[string(name)]; m != nil {
a = append(a, m)
@@ -144,9 +150,24 @@ func (i *Index) MeasurementsByName(names [][]byte) ([]*Measurement, error) {
return a, nil
}
+// MeasurementIterator returns an iterator over all measurements in the index.
+// MeasurementIterator does not support authorization.
+func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) {
+ names, err := i.MeasurementNamesByExpr(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return tsdb.NewMeasurementSliceIterator(names), nil
+}
+
// CreateSeriesIfNotExists adds the series for the given measurement to the
// index and sets its ID or returns the existing series object
-func (i *Index) CreateSeriesIfNotExists(shardID uint64, key, name []byte, tags models.Tags, opt *tsdb.EngineOptions, ignoreLimits bool) error {
+func (i *Index) CreateSeriesIfNotExists(shardID uint64, seriesSet *tsdb.SeriesIDSet, key, name []byte, tags models.Tags, opt *tsdb.EngineOptions, ignoreLimits bool) error {
+ if _, err := i.sfile.CreateSeriesListIfNotExists([][]byte{name}, []models.Tags{tags}, nil); err != nil {
+ return err
+ }
+ seriesID := i.sfile.SeriesID(name, tags, nil)
+
i.mu.RLock()
// if there is a series for this id, it's already been added
ss := i.series[string(key)]
@@ -179,25 +200,25 @@ func (i *Index) CreateSeriesIfNotExists(shardID uint64, key, name []byte, tags m
// set the in memory ID for query processing on this shard
// The series key and tags are clone to prevent a memory leak
- series := NewSeries([]byte(string(key)), tags.Clone())
- series.ID = i.lastID + 1
- i.lastID++
+ skey := string(key)
+ ss = newSeries(seriesID, m, skey, tags.Clone())
+ i.series[skey] = ss
- series.SetMeasurement(m)
- i.series[string(key)] = series
-
- m.AddSeries(series)
- series.AssignShard(shardID, time.Now().UnixNano())
+ m.AddSeries(ss)
+ ss.AssignShard(shardID, time.Now().UnixNano())
// Add the series to the series sketch.
i.seriesSketch.Add(key)
+ // This series needs to be added to the bitset tracking undeleted series IDs.
+ seriesSet.Add(seriesID)
+
return nil
}
// CreateMeasurementIndexIfNotExists creates or retrieves an in memory index
// object for the measurement
-func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *Measurement {
+func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *measurement {
name = escape.Unescape(name)
// See if the measurement exists using a read-lock
@@ -217,7 +238,7 @@ func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *Measurement {
// and acquire the write lock
m = i.measurements[string(name)]
if m == nil {
- m = NewMeasurement(i.database, string(name))
+ m = newMeasurement(i.database, string(name))
i.measurements[string(name)] = m
// Add the measurement to the measurements sketch.
@@ -239,15 +260,15 @@ func (i *Index) HasTagKey(name, key []byte) (bool, error) {
}
// HasTagValue returns true if tag value exists.
-func (i *Index) HasTagValue(name, key, value []byte) bool {
+func (i *Index) HasTagValue(name, key, value []byte) (bool, error) {
i.mu.RLock()
mm := i.measurements[string(name)]
i.mu.RUnlock()
if mm == nil {
- return false
+ return false, nil
}
- return mm.HasTagKeyValue(key, value)
+ return mm.HasTagKeyValue(key, value), nil
}
// TagValueN returns the cardinality of a tag value.
@@ -291,19 +312,19 @@ func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, ke
// possible to get the set of unique series IDs for a given measurement name
// and tag key.
var authorized bool
- mm.SeriesByTagKeyValue(key).Range(func(_ string, seriesIDs SeriesIDs) bool {
+ mm.SeriesByTagKeyValue(key).Range(func(_ string, sIDs seriesIDs) bool {
if auth == nil || auth == query.OpenAuthorizer {
authorized = true
return false
}
- for _, id := range seriesIDs {
+ for _, id := range sIDs {
s := mm.SeriesByID(id)
if s == nil {
continue
}
- if auth.AuthorizeSeriesRead(i.database, mm.name, s.Tags()) {
+ if auth.AuthorizeSeriesRead(i.database, mm.NameBytes, s.Tags) {
authorized = true
return false
}
@@ -366,13 +387,13 @@ func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte
if s == nil {
continue
}
- if auth != nil && !auth.AuthorizeSeriesRead(i.database, s.Measurement().name, s.Tags()) {
+ if auth != nil && !auth.AuthorizeSeriesRead(i.database, s.Measurement.NameBytes, s.Tags) {
continue
}
// Iterate the tag keys we're interested in and collect values
// from this series, if they exist.
- for _, t := range s.Tags() {
+ for _, t := range s.Tags {
if idx, ok := keyIdxs[string(t.Key)]; ok {
resultSet[idx].add(string(t.Value))
} else if string(t.Key) > keys[len(keys)-1] {
@@ -430,11 +451,14 @@ func (i *Index) TagsForSeries(key string) (models.Tags, error) {
if ss == nil {
return nil, nil
}
- return ss.Tags(), nil
+ return ss.Tags, nil
}
// MeasurementNamesByExpr takes an expression containing only tags and returns a
// list of matching measurement names.
+//
+// TODO(edd): Remove authorisation from these methods. There shouldn't need to
+// be any auth passed down into the index.
func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
i.mu.RLock()
defer i.mu.RUnlock()
@@ -444,7 +468,7 @@ func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr
a := make([][]byte, 0, len(i.measurements))
for _, m := range i.measurements {
if m.Authorized(auth) {
- a = append(a, m.name)
+ a = append(a, m.NameBytes)
}
}
bytesutil.Sort(a)
@@ -536,7 +560,7 @@ func (i *Index) measurementNamesByNameFilter(auth query.Authorizer, op influxql.
}
if matched && m.Authorized(auth) {
- names = append(names, m.name)
+ names = append(names, m.NameBytes)
}
}
bytesutil.Sort(names)
@@ -568,7 +592,7 @@ func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagF
// Check the tag values belonging to the tag key for equivalence to the
// tag value being filtered on.
- tagVals.Range(func(tv string, seriesIDs SeriesIDs) bool {
+ tagVals.Range(func(tv string, seriesIDs seriesIDs) bool {
if !valEqual(tv) {
return true // No match. Keep checking.
}
@@ -581,7 +605,14 @@ func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagF
// Is there a series with this matching tag value that is
// authorized to be read?
for _, sid := range seriesIDs {
- if s := m.SeriesByID(sid); s != nil && auth.AuthorizeSeriesRead(i.database, m.name, s.Tags()) {
+ s := m.SeriesByID(sid)
+
+ // If the series is deleted then it can't be used to authorise against.
+ if s != nil && s.Deleted() {
+ continue
+ }
+
+ if s != nil && auth.AuthorizeSeriesRead(i.database, m.NameBytes, s.Tags) {
// The Range call can return early as a matching
// tag value with an authorized series has been found.
authorized = true
@@ -608,7 +639,7 @@ func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagF
// False | True | False
// False | False | True
if tagMatch == (filter.Op == influxql.EQ || filter.Op == influxql.EQREGEX) && authorized {
- names = append(names, []byte(m.Name))
+ names = append(names, m.NameBytes)
}
}
@@ -624,7 +655,7 @@ func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
var matches [][]byte
for _, m := range i.measurements {
if re.MatchString(m.Name) {
- matches = append(matches, []byte(m.Name))
+ matches = append(matches, m.NameBytes)
}
}
return matches, nil
@@ -682,14 +713,13 @@ func (i *Index) DropSeries(key []byte, ts int64) error {
delete(i.series, k)
// Remove the measurement's reference.
- series.Measurement().DropSeries(series)
-
+ series.Measurement.DropSeries(series)
// Mark the series as deleted.
series.Delete(ts)
// If the measurement no longer has any series, remove it as well.
- if !series.Measurement().HasSeries() {
- i.dropMeasurement(series.Measurement().Name)
+ if !series.Measurement.HasSeries() {
+ i.dropMeasurement(series.Measurement.Name)
}
return nil
@@ -721,10 +751,22 @@ func (i *Index) SeriesKeys() []string {
}
i.mu.RUnlock()
return s
+
}
// SetFieldSet sets a shared field set from the engine.
-func (i *Index) SetFieldSet(*tsdb.MeasurementFieldSet) {}
+func (i *Index) SetFieldSet(fieldset *tsdb.MeasurementFieldSet) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ i.fieldset = fieldset
+}
+
+// FieldSet returns the assigned fieldset.
+func (i *Index) FieldSet() *tsdb.MeasurementFieldSet {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.fieldset
+}
// SetFieldName adds a field name to a measurement.
func (i *Index) SetFieldName(measurement []byte, name string) {
@@ -735,7 +777,7 @@ func (i *Index) SetFieldName(measurement []byte, name string) {
// ForEachMeasurementName iterates over each measurement name.
func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {
i.mu.RLock()
- mms := make(Measurements, 0, len(i.measurements))
+ mms := make(measurements, 0, len(i.measurements))
for _, m := range i.measurements {
mms = append(mms, m)
}
@@ -743,19 +785,110 @@ func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {
i.mu.RUnlock()
for _, m := range mms {
- if err := fn([]byte(m.Name)); err != nil {
+ if err := fn(m.NameBytes); err != nil {
return err
}
}
return nil
}
-func (i *Index) MeasurementSeriesKeysByExprIterator(name []byte, condition influxql.Expr) (tsdb.SeriesIterator, error) {
- keys, err := i.MeasurementSeriesKeysByExpr(name, condition)
+func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
+ return i.MeasurementSeriesKeysByExprIterator(name, nil)
+}
+
+func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ m := i.measurements[string(name)]
+ if m == nil {
+ return nil, nil
+ }
+ return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDsByTagKey(key))), nil
+}
+
+func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ m := i.measurements[string(name)]
+ if m == nil {
+ return nil, nil
+ }
+ return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDsByTagValue(key, value))), nil
+}
+
+func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ m := i.measurements[string(name)]
+ if m == nil {
+ return nil, nil
+ }
+ keys := m.TagKeys()
+ sort.Strings(keys)
+
+ a := make([][]byte, len(keys))
+ for i := range a {
+ a[i] = []byte(keys[i])
+ }
+ return tsdb.NewTagKeySliceIterator(a), nil
+}
+
+// TagValueIterator provides an iterator over all the tag values belonging to
+// series with the provided measurement name and tag key.
+//
+// TagValueIterator does not currently support authorization.
+func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ m := i.measurements[string(name)]
+ if m == nil {
+ return nil, nil
+ }
+ values := m.TagValues(nil, string(key))
+ sort.Strings(values)
+
+ a := make([][]byte, len(values))
+ for i := range a {
+ a[i] = []byte(values[i])
+ }
+ return tsdb.NewTagValueSliceIterator(a), nil
+}
+
+func (i *Index) MeasurementSeriesKeysByExprIterator(name []byte, condition influxql.Expr) (tsdb.SeriesIDIterator, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ m := i.measurements[string(name)]
+ if m == nil {
+ return nil, nil
+ }
+
+ // Return all series if no condition specified.
+ if condition == nil {
+ return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDs())), nil
+ }
+
+ // Get series IDs that match the WHERE clause.
+ ids, filters, err := m.WalkWhereForSeriesIds(condition)
if err != nil {
return nil, err
}
- return &seriesIterator{keys: keys}, err
+
+ // Delete boolean literal true filter expressions.
+ // These are returned for `WHERE tagKey = 'tagVal'` type expressions and are okay.
+ filters.DeleteBoolLiteralTrues()
+
+ // Check for unsupported field filters.
+ // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).
+ if filters.Len() > 0 {
+ return nil, errors.New("fields not supported in WHERE clause during deletion")
+ }
+
+ return tsdb.NewSeriesIDSliceIterator([]uint64(ids)), nil
}
func (i *Index) MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error) {
@@ -791,31 +924,25 @@ func (i *Index) MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr
return m.SeriesKeysByID(ids), nil
}
-// SeriesPointIterator returns an influxql iterator over all series.
-func (i *Index) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) {
+// SeriesIDIterator returns an influxql iterator over matching series ids.
+func (i *Index) SeriesIDIterator(opt query.IteratorOptions) (tsdb.SeriesIDIterator, error) {
i.mu.RLock()
defer i.mu.RUnlock()
// Read and sort all measurements.
- mms := make(Measurements, 0, len(i.measurements))
+ mms := make(measurements, 0, len(i.measurements))
for _, mm := range i.measurements {
mms = append(mms, mm)
}
sort.Sort(mms)
- return &seriesPointIterator{
+ return &seriesIDIterator{
database: i.database,
mms: mms,
- point: query.FloatPoint{
- Aux: make([]interface{}, len(opt.Aux)),
- },
- opt: opt,
+ opt: opt,
}, nil
}
-// SnapshotTo is a no-op since this is an in-memory index.
-func (i *Index) SnapshotTo(path string) error { return nil }
-
// DiskSizeBytes always returns zero bytes, since this is an in-memory index.
func (i *Index) DiskSizeBytes() int64 { return 0 }
@@ -908,12 +1035,25 @@ var _ tsdb.Index = &ShardIndex{}
// in-memory index. This is required because per-shard in-memory indexes will
// grow the heap size too large.
type ShardIndex struct {
- *Index
+ id uint64 // shard id
+
+ *Index // Shared reference to global database-wide index.
+
+ // Bitset storing all undeleted series IDs associated with this shard.
+ seriesIDSet *tsdb.SeriesIDSet
- id uint64 // shard id
opt tsdb.EngineOptions
}
+// UnassignShard unassigns the provided series from this shard.
+func (idx *ShardIndex) UnassignShard(key string, id uint64, ts int64) error {
+ // TODO(edd): temporarily munging series id and shard id into same value,
+ // to test prototype without having to change Index API.
+ sid, shardID := id>>32, id&0xFFFFFFFF
+ idx.seriesIDSet.Remove(sid)
+ return idx.Index.UnassignShard(key, shardID, ts)
+}
+
// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.
func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {
keys, names, tagsSlice = idx.assignExistingSeries(idx.id, keys, names, tagsSlice)
@@ -921,9 +1061,10 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
return nil
}
- var reason string
- var dropped int
- var droppedKeys map[string]struct{}
+ var (
+ reason string
+ droppedKeys [][]byte
+ )
// Ensure that no tags go over the maximum cardinality.
if maxValuesPerTag := idx.opt.Config.MaxValuesPerTag; maxValuesPerTag > 0 {
@@ -934,7 +1075,7 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
tags := tagsSlice[i]
for _, tag := range tags {
// Skip if the tag value already exists.
- if idx.HasTagValue(name, tag.Key, tag.Value) {
+ if ok, _ := idx.HasTagValue(name, tag.Key, tag.Value); ok {
continue
}
@@ -944,19 +1085,19 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
continue
}
- dropped++
- reason = fmt.Sprintf("max-values-per-tag limit exceeded (%d/%d): measurement=%q tag=%q value=%q",
- n, maxValuesPerTag, name, string(tag.Key), string(tag.Value))
-
- if droppedKeys == nil {
- droppedKeys = make(map[string]struct{})
+ if reason == "" {
+ reason = fmt.Sprintf("max-values-per-tag limit exceeded (%d/%d): measurement=%q tag=%q value=%q",
+ n, maxValuesPerTag, name, string(tag.Key), string(tag.Value))
}
- droppedKeys[string(keys[i])] = struct{}{}
+
+ droppedKeys = append(droppedKeys, keys[i])
continue outer
}
// Increment success count if all checks complete.
- keys[n], names[n], tagsSlice[n] = keys[i], names[i], tagsSlice[i]
+ if n != i {
+ keys[n], names[n], tagsSlice[n] = keys[i], names[i], tagsSlice[i]
+ }
n++
}
@@ -967,12 +1108,11 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
// Write
for i := range keys {
if err := idx.CreateSeriesIfNotExists(keys[i], names[i], tagsSlice[i]); err == errMaxSeriesPerDatabaseExceeded {
- dropped++
- reason = fmt.Sprintf("max-series-per-database limit exceeded: (%d)", idx.opt.Config.MaxSeriesPerDatabase)
- if droppedKeys == nil {
- droppedKeys = make(map[string]struct{})
+ if reason == "" {
+ reason = fmt.Sprintf("max-series-per-database limit exceeded: (%d)", idx.opt.Config.MaxSeriesPerDatabase)
}
- droppedKeys[string(keys[i])] = struct{}{}
+
+ droppedKeys = append(droppedKeys, keys[i])
continue
} else if err != nil {
return err
@@ -980,7 +1120,9 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
}
// Report partial writes back to shard.
- if dropped > 0 {
+ if len(droppedKeys) > 0 {
+ dropped := len(droppedKeys) // number dropped before deduping
+ bytesutil.SortDedup(droppedKeys)
return &tsdb.PartialWriteError{
Reason: reason,
Dropped: dropped,
@@ -994,11 +1136,11 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli
// InitializeSeries is called during start-up.
// This works the same as CreateSeriesIfNotExists except it ignore limit errors.
func (i *ShardIndex) InitializeSeries(key, name []byte, tags models.Tags) error {
- return i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, true)
+ return i.Index.CreateSeriesIfNotExists(i.id, i.seriesIDSet, key, name, tags, &i.opt, true)
}
func (i *ShardIndex) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
- return i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, false)
+ return i.Index.CreateSeriesIfNotExists(i.id, i.seriesIDSet, key, name, tags, &i.opt, false)
}
// TagSets returns a list of tag sets based on series filtering.
@@ -1006,44 +1148,48 @@ func (i *ShardIndex) TagSets(name []byte, opt query.IteratorOptions) ([]*query.T
return i.Index.TagSets(i.id, name, opt)
}
+// SeriesIDSet returns the bitset associated with the series ids.
+func (i *ShardIndex) SeriesIDSet() *tsdb.SeriesIDSet {
+ return i.seriesIDSet
+}
+
// NewShardIndex returns a new index for a shard.
-func NewShardIndex(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {
+func NewShardIndex(id uint64, database, path string, seriesIDSet *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index {
return &ShardIndex{
- Index: opt.InmemIndex.(*Index),
- id: id,
- opt: opt,
+ Index: opt.InmemIndex.(*Index),
+ id: id,
+ seriesIDSet: seriesIDSet,
+ opt: opt,
}
}
-// seriesPointIterator emits series as influxql points.
-type seriesPointIterator struct {
+// seriesIDIterator emits series ids.
+type seriesIDIterator struct {
database string
- mms Measurements
+ mms measurements
keys struct {
- buf []*Series
+ buf []*series
i int
}
-
- point query.FloatPoint // reusable point
- opt query.IteratorOptions
+ opt query.IteratorOptions
}
// Stats returns stats about the points processed.
-func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
+func (itr *seriesIDIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
// Close closes the iterator.
-func (itr *seriesPointIterator) Close() error { return nil }
+func (itr *seriesIDIterator) Close() error { return nil }
// Next emits the next point in the iterator.
-func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) {
+func (itr *seriesIDIterator) Next() (tsdb.SeriesIDElem, error) {
for {
// Load next measurement's keys if there are no more remaining.
if itr.keys.i >= len(itr.keys.buf) {
if err := itr.nextKeys(); err != nil {
- return nil, err
+ return tsdb.SeriesIDElem{}, err
}
if len(itr.keys.buf) == 0 {
- return nil, nil
+ return tsdb.SeriesIDElem{}, nil
}
}
@@ -1051,23 +1197,16 @@ func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) {
series := itr.keys.buf[itr.keys.i]
itr.keys.i++
- if !itr.opt.Authorizer.AuthorizeSeriesRead(itr.database, series.measurement.name, series.tags) {
+ if !itr.opt.Authorizer.AuthorizeSeriesRead(itr.database, series.Measurement.NameBytes, series.Tags) {
continue
}
- // Write auxiliary fields.
- for i, f := range itr.opt.Aux {
- switch f.Val {
- case "key":
- itr.point.Aux[i] = series.Key
- }
- }
- return &itr.point, nil
+ return tsdb.SeriesIDElem{SeriesID: series.ID}, nil
}
}
// nextKeys reads all keys for the next measurement.
-func (itr *seriesPointIterator) nextKeys() error {
+func (itr *seriesIDIterator) nextKeys() error {
for {
// Ensure previous keys are cleared out.
itr.keys.i, itr.keys.buf = 0, itr.keys.buf[:0]
@@ -1103,20 +1242,20 @@ var errMaxSeriesPerDatabaseExceeded = errors.New("max series per database exceed
type seriesIterator struct {
keys [][]byte
- elem series
+ elem seriesElement
}
-type series struct {
+type seriesElement struct {
tsdb.SeriesElem
name []byte
tags models.Tags
deleted bool
}
-func (s series) Name() []byte { return s.name }
-func (s series) Tags() models.Tags { return s.tags }
-func (s series) Deleted() bool { return s.deleted }
-func (s series) Expr() influxql.Expr { return nil }
+func (s seriesElement) Name() []byte { return s.name }
+func (s seriesElement) Tags() models.Tags { return s.tags }
+func (s seriesElement) Deleted() bool { return s.deleted }
+func (s seriesElement) Expr() influxql.Expr { return nil }
func (itr *seriesIterator) Next() tsdb.SeriesElem {
if len(itr.keys) == 0 {
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go
new file mode 100644
index 0000000000..5e5ffa24b5
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go
@@ -0,0 +1,85 @@
+package inmem_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
+ "github.com/influxdata/influxdb/tsdb/index/inmem"
+)
+
+func createData(lo, hi int) (keys, names [][]byte, tags []models.Tags) {
+ for i := lo; i < hi; i++ {
+ keys = append(keys, []byte(fmt.Sprintf("m0,tag0=t%d", i)))
+ names = append(names, []byte("m0"))
+ var t models.Tags
+ t.Set([]byte("tag0"), []byte(fmt.Sprintf("%d", i)))
+ tags = append(tags, t)
+ }
+ return
+}
+
+func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxValuesExceeded(b *testing.B) {
+ opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)}
+ opt.Config.MaxValuesPerTag = 10
+ si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt)
+ si.Open()
+ keys, names, tags := createData(0, 10)
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ keys, names, tags = createData(9, 5010)
+ for i := 0; i < b.N; i++ {
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ }
+}
+
+func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxValuesNotExceeded(b *testing.B) {
+ opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)}
+ opt.Config.MaxValuesPerTag = 100000
+ si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt)
+ si.Open()
+ keys, names, tags := createData(0, 10)
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ keys, names, tags = createData(9, 5010)
+ for i := 0; i < b.N; i++ {
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ }
+}
+
+func BenchmarkShardIndex_CreateSeriesListIfNotExists_NoMaxValues(b *testing.B) {
+ opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)}
+ si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt)
+ si.Open()
+ keys, names, tags := createData(0, 10)
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ keys, names, tags = createData(9, 5010)
+ for i := 0; i < b.N; i++ {
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ }
+}
+
+func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxSeriesExceeded(b *testing.B) {
+ opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)}
+ opt.Config.MaxValuesPerTag = 0
+ opt.Config.MaxSeriesPerDatabase = 10
+ si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt)
+ si.Open()
+ keys, names, tags := createData(0, 10)
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ keys, names, tags = createData(9, 5010)
+ for i := 0; i < b.N; i++ {
+ si.CreateSeriesListIfNotExists(keys, names, tags)
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go
index b5bf88aa21..8cff602641 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go
@@ -20,47 +20,43 @@ import (
// contains in memory structures for indexing tags. Exported functions are
// goroutine safe while un-exported functions assume the caller will use the
// appropriate locks.
-type Measurement struct {
- database string
- Name string `json:"name,omitempty"`
- name []byte // cached version as []byte
+type measurement struct {
+ Database string
+ Name string `json:"name,omitempty"`
+ NameBytes []byte // cached version as []byte
mu sync.RWMutex
fieldNames map[string]struct{}
// in-memory index fields
- seriesByID map[uint64]*Series // lookup table for series by their id
- seriesByTagKeyValue map[string]*TagKeyValue // map from tag key to value to sorted set of series ids
+ seriesByID map[uint64]*series // lookup table for series by their id
+ seriesByTagKeyValue map[string]*tagKeyValue // map from tag key to value to sorted set of series ids
// lazyily created sorted series IDs
- sortedSeriesIDs SeriesIDs // sorted list of series IDs in this measurement
+ sortedSeriesIDs seriesIDs // sorted list of series IDs in this measurement
// Indicates whether the seriesByTagKeyValueMap needs to be rebuilt as it contains deleted series
// that waste memory.
dirty bool
}
-// NewMeasurement allocates and initializes a new Measurement.
-func NewMeasurement(database, name string) *Measurement {
- return &Measurement{
- database: database,
- Name: name,
- name: []byte(name),
- fieldNames: make(map[string]struct{}),
+// newMeasurement allocates and initializes a new Measurement.
+func newMeasurement(database, name string) *measurement {
+ return &measurement{
+ Database: database,
+ Name: name,
+ NameBytes: []byte(name),
- seriesByID: make(map[uint64]*Series),
- seriesByTagKeyValue: make(map[string]*TagKeyValue),
+ fieldNames: make(map[string]struct{}),
+ seriesByID: make(map[uint64]*series),
+ seriesByTagKeyValue: make(map[string]*tagKeyValue),
}
}
// Authorized determines if this Measurement is authorized to be read, according
// to the provided Authorizer. A measurement is authorized to be read if at
-// least one series from the measurement is authorized to be read.
-func (m *Measurement) Authorized(auth query.Authorizer) bool {
- if auth == nil {
- return true
- }
-
+// least one undeleted series from the measurement is authorized to be read.
+func (m *measurement) Authorized(auth query.Authorizer) bool {
// Note(edd): the cost of this check scales linearly with the number of series
// belonging to a measurement, which means it may become expensive when there
// are large numbers of series on a measurement.
@@ -68,14 +64,18 @@ func (m *Measurement) Authorized(auth query.Authorizer) bool {
// In the future we might want to push the set of series down into the
// authorizer, but that will require an API change.
for _, s := range m.SeriesByIDMap() {
- if auth.AuthorizeSeriesRead(m.database, m.name, s.tags) {
+ if s != nil && s.Deleted() {
+ continue
+ }
+
+ if auth == nil || auth.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) {
return true
}
}
return false
}
-func (m *Measurement) HasField(name string) bool {
+func (m *measurement) HasField(name string) bool {
m.mu.RLock()
_, hasField := m.fieldNames[name]
m.mu.RUnlock()
@@ -83,24 +83,24 @@ func (m *Measurement) HasField(name string) bool {
}
// SeriesByID returns a series by identifier.
-func (m *Measurement) SeriesByID(id uint64) *Series {
+func (m *measurement) SeriesByID(id uint64) *series {
m.mu.RLock()
defer m.mu.RUnlock()
return m.seriesByID[id]
}
// SeriesByIDMap returns the internal seriesByID map.
-func (m *Measurement) SeriesByIDMap() map[uint64]*Series {
+func (m *measurement) SeriesByIDMap() map[uint64]*series {
m.mu.RLock()
defer m.mu.RUnlock()
return m.seriesByID
}
// SeriesByIDSlice returns a list of series by identifiers.
-func (m *Measurement) SeriesByIDSlice(ids []uint64) []*Series {
+func (m *measurement) SeriesByIDSlice(ids []uint64) []*series {
m.mu.RLock()
defer m.mu.RUnlock()
- a := make([]*Series, len(ids))
+ a := make([]*series, len(ids))
for i, id := range ids {
a[i] = m.seriesByID[id]
}
@@ -108,7 +108,7 @@ func (m *Measurement) SeriesByIDSlice(ids []uint64) []*Series {
}
// AppendSeriesKeysByID appends keys for a list of series ids to a buffer.
-func (m *Measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string {
+func (m *measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string {
m.mu.RLock()
defer m.mu.RUnlock()
for _, id := range ids {
@@ -120,7 +120,7 @@ func (m *Measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string
}
// SeriesKeysByID returns the a list of keys for a set of ids.
-func (m *Measurement) SeriesKeysByID(ids SeriesIDs) [][]byte {
+func (m *measurement) SeriesKeysByID(ids seriesIDs) [][]byte {
m.mu.RLock()
defer m.mu.RUnlock()
keys := make([][]byte, 0, len(ids))
@@ -140,7 +140,7 @@ func (m *Measurement) SeriesKeysByID(ids SeriesIDs) [][]byte {
}
// SeriesKeys returns the keys of every series in this measurement
-func (m *Measurement) SeriesKeys() [][]byte {
+func (m *measurement) SeriesKeys() [][]byte {
m.mu.RLock()
defer m.mu.RUnlock()
keys := make([][]byte, 0, len(m.seriesByID))
@@ -158,7 +158,7 @@ func (m *Measurement) SeriesKeys() [][]byte {
return keys
}
-func (m *Measurement) SeriesIDs() SeriesIDs {
+func (m *measurement) SeriesIDs() seriesIDs {
m.mu.RLock()
if len(m.sortedSeriesIDs) == len(m.seriesByID) {
s := m.sortedSeriesIDs
@@ -176,7 +176,7 @@ func (m *Measurement) SeriesIDs() SeriesIDs {
m.sortedSeriesIDs = m.sortedSeriesIDs[:0]
if cap(m.sortedSeriesIDs) < len(m.seriesByID) {
- m.sortedSeriesIDs = make(SeriesIDs, 0, len(m.seriesByID))
+ m.sortedSeriesIDs = make(seriesIDs, 0, len(m.seriesByID))
}
for k, v := range m.seriesByID {
@@ -192,28 +192,28 @@ func (m *Measurement) SeriesIDs() SeriesIDs {
}
// HasTagKey returns true if at least one series in this measurement has written a value for the passed in tag key
-func (m *Measurement) HasTagKey(k string) bool {
+func (m *measurement) HasTagKey(k string) bool {
m.mu.RLock()
defer m.mu.RUnlock()
_, hasTag := m.seriesByTagKeyValue[k]
return hasTag
}
-func (m *Measurement) HasTagKeyValue(k, v []byte) bool {
+func (m *measurement) HasTagKeyValue(k, v []byte) bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.seriesByTagKeyValue[string(k)].Contains(string(v))
}
// HasSeries returns true if there is at least 1 series under this measurement.
-func (m *Measurement) HasSeries() bool {
+func (m *measurement) HasSeries() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.seriesByID) > 0
}
// Cardinality returns the number of values associated with the given tag key.
-func (m *Measurement) Cardinality(key string) int {
+func (m *measurement) Cardinality(key string) int {
var n int
m.mu.RLock()
n = m.cardinality(key)
@@ -221,12 +221,12 @@ func (m *Measurement) Cardinality(key string) int {
return n
}
-func (m *Measurement) cardinality(key string) int {
+func (m *measurement) cardinality(key string) int {
return m.seriesByTagKeyValue[key].Cardinality()
}
// CardinalityBytes returns the number of values associated with the given tag key.
-func (m *Measurement) CardinalityBytes(key []byte) int {
+func (m *measurement) CardinalityBytes(key []byte) int {
m.mu.RLock()
defer m.mu.RUnlock()
return m.seriesByTagKeyValue[string(key)].Cardinality()
@@ -234,7 +234,7 @@ func (m *Measurement) CardinalityBytes(key []byte) int {
// AddSeries adds a series to the measurement's index.
// It returns true if the series was added successfully or false if the series was already present.
-func (m *Measurement) AddSeries(s *Series) bool {
+func (m *measurement) AddSeries(s *series) bool {
if s == nil {
return false
}
@@ -260,28 +260,20 @@ func (m *Measurement) AddSeries(s *Series) bool {
}
// add this series id to the tag index on the measurement
- s.ForEachTag(func(t models.Tag) {
+ for _, t := range s.Tags {
valueMap := m.seriesByTagKeyValue[string(t.Key)]
if valueMap == nil {
- valueMap = NewTagKeyValue()
+ valueMap = newTagKeyValue()
m.seriesByTagKeyValue[string(t.Key)] = valueMap
}
- ids := valueMap.LoadByte(t.Value)
- ids = append(ids, s.ID)
-
- // most of the time the series ID will be higher than all others because it's a new
- // series. So don't do the sort if we don't have to.
- if len(ids) > 1 && ids[len(ids)-1] < ids[len(ids)-2] {
- sort.Sort(ids)
- }
- valueMap.Store(string(t.Value), ids)
- })
+ valueMap.InsertSeriesIDByte(t.Value, s.ID)
+ }
return true
}
// DropSeries removes a series from the measurement's index.
-func (m *Measurement) DropSeries(series *Series) {
+func (m *measurement) DropSeries(series *series) {
seriesID := series.ID
m.mu.Lock()
defer m.mu.Unlock()
@@ -299,7 +291,7 @@ func (m *Measurement) DropSeries(series *Series) {
m.dirty = true
}
-func (m *Measurement) Rebuild() *Measurement {
+func (m *measurement) Rebuild() *measurement {
m.mu.RLock()
// Nothing needs to be rebuilt.
@@ -309,7 +301,7 @@ func (m *Measurement) Rebuild() *Measurement {
}
// Create a new measurement from the state of the existing measurement
- nm := NewMeasurement(m.database, string(m.name))
+ nm := newMeasurement(m.Database, string(m.NameBytes))
nm.fieldNames = m.fieldNames
m.mu.RUnlock()
@@ -339,7 +331,7 @@ func (m *Measurement) Rebuild() *Measurement {
// filters walks the where clause of a select statement and returns a map with all series ids
// matching the where clause and any filter expression that should be applied to each
-func (m *Measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]influxql.Expr, error) {
+func (m *measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]influxql.Expr, error) {
if condition == nil {
return m.SeriesIDs(), nil, nil
}
@@ -347,7 +339,7 @@ func (m *Measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]inf
}
// ForEachSeriesByExpr iterates over all series filtered by condition.
-func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags models.Tags) error) error {
+func (m *measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags models.Tags) error) error {
// Retrieve matching series ids.
ids, _, err := m.filters(condition)
if err != nil {
@@ -357,7 +349,7 @@ func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags
// Iterate over each series.
for _, id := range ids {
s := m.SeriesByID(id)
- if err := fn(s.Tags()); err != nil {
+ if err := fn(s.Tags); err != nil {
return err
}
}
@@ -373,7 +365,7 @@ func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags
// This will also populate the TagSet objects with the series IDs that match each tagset and any
// influx filter expression that goes with the series
// TODO: this shouldn't be exported. However, until tx.go and the engine get refactored into tsdb, we need it.
-func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*query.TagSet, error) {
+func (m *measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*query.TagSet, error) {
// get the unique set of series ids and the filters that should be applied to each
ids, filters, err := m.filters(opt.Condition)
if err != nil {
@@ -412,13 +404,13 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que
continue
}
- if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(m.database, m.name, s.Tags()) {
+ if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) {
continue
}
var tagsAsKey []byte
if len(dims) > 0 {
- tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags())
+ tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags)
}
tagSet := tagSets[string(tagsAsKey)]
@@ -461,7 +453,7 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que
}
// intersectSeriesFilters performs an intersection for two sets of ids and filter expressions.
-func intersectSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) {
+func intersectSeriesFilters(lids, rids seriesIDs, lfilters, rfilters FilterExprs) (seriesIDs, FilterExprs) {
// We only want to allocate a slice and map of the smaller size.
var ids []uint64
if len(lids) > len(rids) {
@@ -515,7 +507,7 @@ func intersectSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs
}
// unionSeriesFilters performs a union for two sets of ids and filter expressions.
-func unionSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) {
+func unionSeriesFilters(lids, rids seriesIDs, lfilters, rfilters FilterExprs) (seriesIDs, FilterExprs) {
ids := make([]uint64, 0, len(lids)+len(rids))
// Setup the filters with the smallest size since we will discard filters
@@ -593,15 +585,39 @@ func unionSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (S
return ids, filters
}
+// SeriesIDsByTagKey returns a list of all series for a tag key.
+func (m *measurement) SeriesIDsByTagKey(key []byte) seriesIDs {
+ tagVals := m.seriesByTagKeyValue[string(key)]
+ if tagVals == nil {
+ return nil
+ }
+
+ var ids seriesIDs
+ tagVals.RangeAll(func(_ string, a seriesIDs) {
+ ids = append(ids, a...)
+ })
+ sort.Sort(ids)
+ return ids
+}
+
+// SeriesIDsByTagValue returns a list of all series for a tag value.
+func (m *measurement) SeriesIDsByTagValue(key, value []byte) seriesIDs {
+ tagVals := m.seriesByTagKeyValue[string(key)]
+ if tagVals == nil {
+ return nil
+ }
+ return tagVals.Load(string(value))
+}
+
// IDsForExpr returns the series IDs that are candidates to match the given expression.
-func (m *Measurement) IDsForExpr(n *influxql.BinaryExpr) SeriesIDs {
+func (m *measurement) IDsForExpr(n *influxql.BinaryExpr) seriesIDs {
ids, _, _ := m.idsForExpr(n)
return ids
}
// idsForExpr returns a collection of series ids and a filter expression that should
// be used to filter points from those series.
-func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Expr, error) {
+func (m *measurement) idsForExpr(n *influxql.BinaryExpr) (seriesIDs, influxql.Expr, error) {
// If this binary expression has another binary expression, then this
// is some expression math and we should just pass it to the underlying query.
if _, ok := n.LHS.(*influxql.BinaryExpr); ok {
@@ -637,7 +653,7 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
// if we're looking for series with a specific tag value
if str, ok := value.(*influxql.StringLiteral); ok {
- var ids SeriesIDs
+ var ids seriesIDs
// Special handling for "_name" to match measurement name.
if name.Val == "_name" {
@@ -653,22 +669,22 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
ids = tagVals.Load(str.Val)
} else {
// Make a copy of all series ids and mark the ones we need to evict.
- seriesIDs := newEvictSeriesIDs(m.SeriesIDs())
+ sIDs := newEvictSeriesIDs(m.SeriesIDs())
// Go through each slice and mark the values we find as zero so
// they can be removed later.
- tagVals.RangeAll(func(_ string, a SeriesIDs) {
- seriesIDs.mark(a)
+ tagVals.RangeAll(func(_ string, a seriesIDs) {
+ sIDs.mark(a)
})
// Make a new slice with only the remaining ids.
- ids = seriesIDs.evict()
+ ids = sIDs.evict()
}
} else if n.Op == influxql.NEQ {
if str.Val != "" {
ids = m.SeriesIDs().Reject(tagVals.Load(str.Val))
} else {
- tagVals.RangeAll(func(_ string, a SeriesIDs) {
+ tagVals.RangeAll(func(_ string, a seriesIDs) {
ids = append(ids, a...)
})
sort.Sort(ids)
@@ -679,7 +695,7 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
// if we're looking for series with a tag value that matches a regex
if re, ok := value.(*influxql.RegexLiteral); ok {
- var ids SeriesIDs
+ var ids seriesIDs
// Special handling for "_name" to match measurement name.
if name.Val == "_name" {
@@ -699,24 +715,24 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
// If we should not include the empty string, include series that match our condition.
if empty && n.Op == influxql.EQREGEX {
// See comments above for EQ with a StringLiteral.
- seriesIDs := newEvictSeriesIDs(m.SeriesIDs())
- tagVals.RangeAll(func(k string, a SeriesIDs) {
+ sIDs := newEvictSeriesIDs(m.SeriesIDs())
+ tagVals.RangeAll(func(k string, a seriesIDs) {
if !re.Val.MatchString(k) {
- seriesIDs.mark(a)
+ sIDs.mark(a)
}
})
- ids = seriesIDs.evict()
+ ids = sIDs.evict()
} else if empty && n.Op == influxql.NEQREGEX {
- ids = make(SeriesIDs, 0, len(m.SeriesIDs()))
- tagVals.RangeAll(func(k string, a SeriesIDs) {
+ ids = make(seriesIDs, 0, len(m.SeriesIDs()))
+ tagVals.RangeAll(func(k string, a seriesIDs) {
if !re.Val.MatchString(k) {
ids = append(ids, a...)
}
})
sort.Sort(ids)
} else if !empty && n.Op == influxql.EQREGEX {
- ids = make(SeriesIDs, 0, len(m.SeriesIDs()))
- tagVals.RangeAll(func(k string, a SeriesIDs) {
+ ids = make(seriesIDs, 0, len(m.SeriesIDs()))
+ tagVals.RangeAll(func(k string, a seriesIDs) {
if re.Val.MatchString(k) {
ids = append(ids, a...)
}
@@ -724,27 +740,27 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
sort.Sort(ids)
} else if !empty && n.Op == influxql.NEQREGEX {
// See comments above for EQ with a StringLiteral.
- seriesIDs := newEvictSeriesIDs(m.SeriesIDs())
- tagVals.RangeAll(func(k string, a SeriesIDs) {
+ sIDs := newEvictSeriesIDs(m.SeriesIDs())
+ tagVals.RangeAll(func(k string, a seriesIDs) {
if re.Val.MatchString(k) {
- seriesIDs.mark(a)
+ sIDs.mark(a)
}
})
- ids = seriesIDs.evict()
+ ids = sIDs.evict()
}
return ids, nil, nil
}
// compare tag values
if ref, ok := value.(*influxql.VarRef); ok {
- var ids SeriesIDs
+ var ids seriesIDs
if n.Op == influxql.NEQ {
ids = m.SeriesIDs()
}
rhsTagVals := m.seriesByTagKeyValue[ref.Val]
- tagVals.RangeAll(func(k string, a SeriesIDs) {
+ tagVals.RangeAll(func(k string, a seriesIDs) {
tags := a.Intersect(rhsTagVals.Load(k))
if n.Op == influxql.EQ {
ids = ids.Union(tags)
@@ -784,7 +800,7 @@ func (fe FilterExprs) Len() int {
// WalkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and
// a map from those series IDs to filter expressions that should be used to limit points returned in
// the final query result.
-func (m *Measurement) WalkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, FilterExprs, error) {
+func (m *measurement) WalkWhereForSeriesIds(expr influxql.Expr) (seriesIDs, FilterExprs, error) {
switch n := expr.(type) {
case *influxql.BinaryExpr:
switch n.Op {
@@ -848,7 +864,7 @@ func (m *Measurement) WalkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, Filt
// expandExpr returns a list of expressions expanded by all possible tag
// combinations.
-func (m *Measurement) expandExpr(expr influxql.Expr) []tagSetExpr {
+func (m *measurement) expandExpr(expr influxql.Expr) []tagSetExpr {
// Retrieve list of unique values for each tag.
valuesByTagKey := m.uniqueTagValues(expr)
@@ -906,7 +922,7 @@ func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr,
// SeriesIDsAllOrByExpr walks an expressions for matching series IDs
// or, if no expressions is given, returns all series IDs for the measurement.
-func (m *Measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error) {
+func (m *measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (seriesIDs, error) {
// If no expression given or the measurement has no series,
// we can take just return the ids or nil accordingly.
if expr == nil {
@@ -930,7 +946,7 @@ func (m *Measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error
}
// tagKeysByExpr extracts the tag keys wanted by the expression.
-func (m *Measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, error) {
+func (m *measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, error) {
if expr == nil {
set := make(map[string]struct{})
for _, key := range m.TagKeys() {
@@ -998,7 +1014,7 @@ func (m *Measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, er
}
// tagKeysByFilter will filter the tag keys for the measurement.
-func (m *Measurement) tagKeysByFilter(op influxql.Token, val string, regex *regexp.Regexp) stringSet {
+func (m *measurement) tagKeysByFilter(op influxql.Token, val string, regex *regexp.Regexp) stringSet {
ss := newStringSet()
for _, key := range m.TagKeys() {
var matched bool
@@ -1057,7 +1073,7 @@ func copyTagExprs(a []tagExpr) []tagExpr {
}
// uniqueTagValues returns a list of unique tag values used in an expression.
-func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {
+func (m *measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {
// Track unique value per tag.
tags := make(map[string]map[string]struct{})
@@ -1107,18 +1123,18 @@ func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {
}
// Measurements represents a list of *Measurement.
-type Measurements []*Measurement
+type measurements []*measurement
// Len implements sort.Interface.
-func (a Measurements) Len() int { return len(a) }
+func (a measurements) Len() int { return len(a) }
// Less implements sort.Interface.
-func (a Measurements) Less(i, j int) bool { return a[i].Name < a[j].Name }
+func (a measurements) Less(i, j int) bool { return a[i].Name < a[j].Name }
// Swap implements sort.Interface.
-func (a Measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Measurements) Intersect(other Measurements) Measurements {
+func (a measurements) Intersect(other measurements) measurements {
l := a
r := other
@@ -1132,7 +1148,7 @@ func (a Measurements) Intersect(other Measurements) Measurements {
// That is, don't run comparisons against lower values that we've already passed
var i, j int
- result := make(Measurements, 0, len(l))
+ result := make(measurements, 0, len(l))
for i < len(l) && j < len(r) {
if l[i].Name == r[j].Name {
result = append(result, l[i])
@@ -1148,8 +1164,8 @@ func (a Measurements) Intersect(other Measurements) Measurements {
return result
}
-func (a Measurements) Union(other Measurements) Measurements {
- result := make(Measurements, 0, len(a)+len(other))
+func (a measurements) Union(other measurements) measurements {
+ result := make(measurements, 0, len(a)+len(other))
var i, j int
for i < len(a) && j < len(other) {
if a[i].Name == other[j].Name {
@@ -1175,33 +1191,38 @@ func (a Measurements) Union(other Measurements) Measurements {
return result
}
-// Series belong to a Measurement and represent unique time series in a database.
-type Series struct {
- mu sync.RWMutex
- Key string
- tags models.Tags
- ID uint64
- measurement *Measurement
- shardIDs map[uint64]struct{} // shards that have this series defined
- deleted bool
-
+// series belong to a Measurement and represent unique time series in a database.
+type series struct {
// lastModified tracks the last time the series was created. If the series
// already exists and a request to create is received (a no-op), lastModified
// is increased to track that it is still in use.
lastModified int64
+
+ // immutable
+ ID uint64
+ Measurement *measurement
+ Key string
+ Tags models.Tags
+
+ mu sync.RWMutex
+ shardIDs map[uint64]struct{} // shards that have this series defined
+
+ deleted bool
}
-// NewSeries returns an initialized series struct
-func NewSeries(key []byte, tags models.Tags) *Series {
- return &Series{
- Key: string(key),
- tags: tags,
+// newSeries returns an initialized series struct
+func newSeries(id uint64, m *measurement, key string, tags models.Tags) *series {
+ return &series{
+ ID: id,
+ Measurement: m,
+ Key: key,
+ Tags: tags,
shardIDs: make(map[uint64]struct{}),
lastModified: time.Now().UTC().UnixNano(),
}
}
-func (s *Series) AssignShard(shardID uint64, ts int64) {
+func (s *series) AssignShard(shardID uint64, ts int64) {
atomic.StoreInt64(&s.lastModified, ts)
if s.Assigned(shardID) {
return
@@ -1215,7 +1236,7 @@ func (s *Series) AssignShard(shardID uint64, ts int64) {
s.mu.Unlock()
}
-func (s *Series) UnassignShard(shardID uint64, ts int64) {
+func (s *series) UnassignShard(shardID uint64, ts int64) {
s.mu.Lock()
if s.LastModified() < ts {
delete(s.shardIDs, shardID)
@@ -1223,66 +1244,26 @@ func (s *Series) UnassignShard(shardID uint64, ts int64) {
s.mu.Unlock()
}
-func (s *Series) Assigned(shardID uint64) bool {
+func (s *series) Assigned(shardID uint64) bool {
s.mu.RLock()
_, ok := s.shardIDs[shardID]
s.mu.RUnlock()
return ok
}
-func (s *Series) LastModified() int64 {
+func (s *series) LastModified() int64 {
return atomic.LoadInt64(&s.lastModified)
}
-func (s *Series) ShardN() int {
+func (s *series) ShardN() int {
s.mu.RLock()
n := len(s.shardIDs)
s.mu.RUnlock()
return n
}
-// Measurement returns the measurement on the series.
-func (s *Series) Measurement() *Measurement {
- return s.measurement
-}
-
-// SetMeasurement sets the measurement on the series.
-func (s *Series) SetMeasurement(m *Measurement) {
- s.measurement = m
-}
-
-// ForEachTag executes fn for every tag. Iteration occurs under lock.
-func (s *Series) ForEachTag(fn func(models.Tag)) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- for _, t := range s.tags {
- fn(t)
- }
-}
-
-// Tags returns a copy of the tags under lock.
-func (s *Series) Tags() models.Tags {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return s.tags
-}
-
-// CopyTags clones the tags on the series in-place,
-func (s *Series) CopyTags() {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.tags = s.tags.Clone()
-}
-
-// GetTagString returns a tag value under lock.
-func (s *Series) GetTagString(key string) string {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return s.tags.GetString(key)
-}
-
// Delete marks this series as deleted. A deleted series should not be returned for queries.
-func (s *Series) Delete(ts int64) {
+func (s *series) Delete(ts int64) {
s.mu.Lock()
if s.LastModified() < ts {
s.deleted = true
@@ -1291,7 +1272,7 @@ func (s *Series) Delete(ts int64) {
}
// Deleted indicates if this was previously deleted.
-func (s *Series) Deleted() bool {
+func (s *series) Deleted() bool {
s.mu.RLock()
v := s.deleted
s.mu.RUnlock()
@@ -1302,75 +1283,104 @@ func (s *Series) Deleted() bool {
// ids mapping to a set of tag values.
//
// TODO(edd): This could possibly be replaced by a sync.Map once we use Go 1.9.
-type TagKeyValue struct {
- mu sync.RWMutex
- valueIDs map[string]SeriesIDs
+type tagKeyValue struct {
+ mu sync.RWMutex
+ entries map[string]*tagKeyValueEntry
}
// NewTagKeyValue initialises a new TagKeyValue.
-func NewTagKeyValue() *TagKeyValue {
- return &TagKeyValue{valueIDs: make(map[string]SeriesIDs)}
+func newTagKeyValue() *tagKeyValue {
+ return &tagKeyValue{entries: make(map[string]*tagKeyValueEntry)}
}
// Cardinality returns the number of values in the TagKeyValue.
-func (t *TagKeyValue) Cardinality() int {
+func (t *tagKeyValue) Cardinality() int {
if t == nil {
return 0
}
t.mu.RLock()
defer t.mu.RUnlock()
- return len(t.valueIDs)
+ return len(t.entries)
}
// Contains returns true if the TagKeyValue contains value.
-func (t *TagKeyValue) Contains(value string) bool {
+func (t *tagKeyValue) Contains(value string) bool {
if t == nil {
return false
}
t.mu.RLock()
defer t.mu.RUnlock()
- _, ok := t.valueIDs[value]
+ _, ok := t.entries[value]
return ok
}
+// InsertSeriesID adds a series id to the tag key value.
+func (t *tagKeyValue) InsertSeriesID(value string, id uint64) {
+ t.mu.Lock()
+ entry := t.entries[value]
+ if entry == nil {
+ entry = newTagKeyValueEntry()
+ t.entries[value] = entry
+ }
+ entry.m[id] = struct{}{}
+ t.mu.Unlock()
+}
+
+// InsertSeriesIDByte adds a series id to the tag key value.
+func (t *tagKeyValue) InsertSeriesIDByte(value []byte, id uint64) {
+ t.mu.Lock()
+ entry := t.entries[string(value)]
+ if entry == nil {
+ entry = newTagKeyValueEntry()
+ t.entries[string(value)] = entry
+ }
+ entry.m[id] = struct{}{}
+ t.mu.Unlock()
+}
+
// Load returns the SeriesIDs for the provided tag value.
-func (t *TagKeyValue) Load(value string) SeriesIDs {
+func (t *tagKeyValue) Load(value string) seriesIDs {
if t == nil {
return nil
}
t.mu.RLock()
- defer t.mu.RUnlock()
- return t.valueIDs[value]
+ entry := t.entries[value]
+ ids := entry.ids()
+ t.mu.RUnlock()
+ return ids
}
// LoadByte returns the SeriesIDs for the provided tag value. It makes use of
// Go's compiler optimisation for avoiding a copy when accessing maps with a []byte.
-func (t *TagKeyValue) LoadByte(value []byte) SeriesIDs {
+func (t *tagKeyValue) LoadByte(value []byte) seriesIDs {
if t == nil {
return nil
}
t.mu.RLock()
- defer t.mu.RUnlock()
- return t.valueIDs[string(value)]
+ entry := t.entries[string(value)]
+ ids := entry.ids()
+ t.mu.RUnlock()
+ return ids
}
// Range calls f sequentially on each key and value. A call to Range on a nil
// TagKeyValue is a no-op.
//
// If f returns false then iteration over any remaining keys or values will cease.
-func (t *TagKeyValue) Range(f func(tagValue string, a SeriesIDs) bool) {
+func (t *tagKeyValue) Range(f func(tagValue string, a seriesIDs) bool) {
if t == nil {
return
}
t.mu.RLock()
defer t.mu.RUnlock()
- for tagValue, a := range t.valueIDs {
- if !f(tagValue, a) {
+ for tagValue, entry := range t.entries {
+ ids := entry.ids()
+ if !f(tagValue, ids) {
return
}
}
@@ -1378,35 +1388,57 @@ func (t *TagKeyValue) Range(f func(tagValue string, a SeriesIDs) bool) {
// RangeAll calls f sequentially on each key and value. A call to RangeAll on a
// nil TagKeyValue is a no-op.
-func (t *TagKeyValue) RangeAll(f func(k string, a SeriesIDs)) {
- t.Range(func(k string, a SeriesIDs) bool {
+func (t *tagKeyValue) RangeAll(f func(k string, a seriesIDs)) {
+ t.Range(func(k string, a seriesIDs) bool {
f(k, a)
return true
})
}
-// Store stores ids under the value key.
-func (t *TagKeyValue) Store(value string, ids SeriesIDs) {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.valueIDs[value] = ids
+type tagKeyValueEntry struct {
+ m map[uint64]struct{} // series id set
+ a seriesIDs // lazily sorted list of series.
+}
+
+func newTagKeyValueEntry() *tagKeyValueEntry {
+ return &tagKeyValueEntry{m: make(map[uint64]struct{})}
+}
+
+func (e *tagKeyValueEntry) ids() seriesIDs {
+ if e == nil {
+ return nil
+ }
+
+ if len(e.a) == len(e.m) {
+ return e.a
+ }
+
+ a := make(seriesIDs, 0, len(e.m))
+ for id := range e.m {
+ a = append(a, id)
+ }
+ sort.Sort(a)
+
+ e.a = a
+ return e.a
+
}
// SeriesIDs is a convenience type for sorting, checking equality, and doing
// union and intersection of collections of series ids.
-type SeriesIDs []uint64
+type seriesIDs []uint64
// Len implements sort.Interface.
-func (a SeriesIDs) Len() int { return len(a) }
+func (a seriesIDs) Len() int { return len(a) }
// Less implements sort.Interface.
-func (a SeriesIDs) Less(i, j int) bool { return a[i] < a[j] }
+func (a seriesIDs) Less(i, j int) bool { return a[i] < a[j] }
// Swap implements sort.Interface.
-func (a SeriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a seriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Equals assumes that both are sorted.
-func (a SeriesIDs) Equals(other SeriesIDs) bool {
+func (a seriesIDs) Equals(other seriesIDs) bool {
if len(a) != len(other) {
return false
}
@@ -1420,7 +1452,7 @@ func (a SeriesIDs) Equals(other SeriesIDs) bool {
// Intersect returns a new collection of series ids in sorted order that is the intersection of the two.
// The two collections must already be sorted.
-func (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs {
+func (a seriesIDs) Intersect(other seriesIDs) seriesIDs {
l := a
r := other
@@ -1447,12 +1479,12 @@ func (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs {
}
}
- return SeriesIDs(ids)
+ return seriesIDs(ids)
}
// Union returns a new collection of series ids in sorted order that is the union of the two.
// The two collections must already be sorted.
-func (a SeriesIDs) Union(other SeriesIDs) SeriesIDs {
+func (a seriesIDs) Union(other seriesIDs) seriesIDs {
l := a
r := other
ids := make([]uint64, 0, len(l)+len(r))
@@ -1483,7 +1515,7 @@ func (a SeriesIDs) Union(other SeriesIDs) SeriesIDs {
// Reject returns a new collection of series ids in sorted order with the passed in set removed from the original.
// This is useful for the NOT operator. The two collections must already be sorted.
-func (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs {
+func (a seriesIDs) Reject(other seriesIDs) seriesIDs {
l := a
r := other
var i, j int
@@ -1506,7 +1538,7 @@ func (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs {
ids = append(ids, l[i:]...)
}
- return SeriesIDs(ids)
+ return seriesIDs(ids)
}
// seriesID is a series id that may or may not have been evicted from the
@@ -1539,9 +1571,9 @@ func newEvictSeriesIDs(ids []uint64) evictSeriesIDs {
// mark marks all of the ids in the sorted slice to be evicted from the list of
// series ids. If an id to be evicted does not exist, it just gets ignored.
func (a *evictSeriesIDs) mark(ids []uint64) {
- seriesIDs := a.ids
+ sIDs := a.ids
for _, id := range ids {
- if len(seriesIDs) == 0 {
+ if len(sIDs) == 0 {
break
}
@@ -1549,29 +1581,29 @@ func (a *evictSeriesIDs) mark(ids []uint64) {
// the first element does not match the value we're
// looking for.
i := 0
- if seriesIDs[0].val < id {
- i = sort.Search(len(seriesIDs), func(i int) bool {
- return seriesIDs[i].val >= id
+ if sIDs[0].val < id {
+ i = sort.Search(len(sIDs), func(i int) bool {
+ return sIDs[i].val >= id
})
}
- if i >= len(seriesIDs) {
+ if i >= len(sIDs) {
break
- } else if seriesIDs[i].val == id {
- if !seriesIDs[i].evict {
- seriesIDs[i].evict = true
+ } else if sIDs[i].val == id {
+ if !sIDs[i].evict {
+ sIDs[i].evict = true
a.sz--
}
// Skip over this series since it has been evicted and won't be
// encountered again.
i++
}
- seriesIDs = seriesIDs[i:]
+ sIDs = sIDs[i:]
}
}
// evict creates a new slice with only the series that have not been evicted.
-func (a *evictSeriesIDs) evict() (ids SeriesIDs) {
+func (a *evictSeriesIDs) evict() (ids seriesIDs) {
if a.sz == 0 {
return ids
}
@@ -1597,7 +1629,7 @@ type TagFilter struct {
// WalkTagKeys calls fn for each tag key associated with m. The order of the
// keys is undefined.
-func (m *Measurement) WalkTagKeys(fn func(k string)) {
+func (m *measurement) WalkTagKeys(fn func(k string)) {
m.mu.RLock()
defer m.mu.RUnlock()
@@ -1607,7 +1639,7 @@ func (m *Measurement) WalkTagKeys(fn func(k string)) {
}
// TagKeys returns a list of the measurement's tag names, in sorted order.
-func (m *Measurement) TagKeys() []string {
+func (m *measurement) TagKeys() []string {
m.mu.RLock()
keys := make([]string, 0, len(m.seriesByTagKeyValue))
for k := range m.seriesByTagKeyValue {
@@ -1619,12 +1651,12 @@ func (m *Measurement) TagKeys() []string {
}
// TagValues returns all the values for the given tag key, in an arbitrary order.
-func (m *Measurement) TagValues(auth query.Authorizer, key string) []string {
+func (m *measurement) TagValues(auth query.Authorizer, key string) []string {
m.mu.RLock()
defer m.mu.RUnlock()
values := make([]string, 0, m.seriesByTagKeyValue[key].Cardinality())
- m.seriesByTagKeyValue[key].RangeAll(func(k string, a SeriesIDs) {
+ m.seriesByTagKeyValue[key].RangeAll(func(k string, a seriesIDs) {
if auth == nil {
values = append(values, k)
} else {
@@ -1633,7 +1665,7 @@ func (m *Measurement) TagValues(auth query.Authorizer, key string) []string {
if s == nil {
continue
}
- if auth.AuthorizeSeriesRead(m.database, m.name, s.Tags()) {
+ if auth.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) {
values = append(values, k)
return
}
@@ -1644,7 +1676,7 @@ func (m *Measurement) TagValues(auth query.Authorizer, key string) []string {
}
// SetFieldName adds the field name to the measurement.
-func (m *Measurement) SetFieldName(name string) {
+func (m *measurement) SetFieldName(name string) {
m.mu.RLock()
_, ok := m.fieldNames[name]
m.mu.RUnlock()
@@ -1659,7 +1691,7 @@ func (m *Measurement) SetFieldName(name string) {
}
// FieldNames returns a list of the measurement's field names, in an arbitrary order.
-func (m *Measurement) FieldNames() []string {
+func (m *measurement) FieldNames() []string {
m.mu.RLock()
defer m.mu.RUnlock()
@@ -1671,7 +1703,7 @@ func (m *Measurement) FieldNames() []string {
}
// SeriesByTagKeyValue returns the TagKeyValue for the provided tag key.
-func (m *Measurement) SeriesByTagKeyValue(key string) *TagKeyValue {
+func (m *measurement) SeriesByTagKeyValue(key string) *tagKeyValue {
m.mu.RLock()
defer m.mu.RUnlock()
return m.seriesByTagKeyValue[key]
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go
index 69714ed85d..910d4b16a7 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go
@@ -1,4 +1,4 @@
-package inmem_test
+package inmem
import (
"fmt"
@@ -8,15 +8,14 @@ import (
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/query"
- "github.com/influxdata/influxdb/tsdb/index/inmem"
"github.com/influxdata/influxql"
)
// Test comparing SeriesIDs for equality.
func TestSeriesIDs_Equals(t *testing.T) {
- ids1 := inmem.SeriesIDs([]uint64{1, 2, 3})
- ids2 := inmem.SeriesIDs([]uint64{1, 2, 3})
- ids3 := inmem.SeriesIDs([]uint64{4, 5, 6})
+ ids1 := seriesIDs([]uint64{1, 2, 3})
+ ids2 := seriesIDs([]uint64{1, 2, 3})
+ ids3 := seriesIDs([]uint64{4, 5, 6})
if !ids1.Equals(ids2) {
t.Fatal("expected ids1 == ids2")
@@ -27,10 +26,10 @@ func TestSeriesIDs_Equals(t *testing.T) {
// Test intersecting sets of SeriesIDs.
func TestSeriesIDs_Intersect(t *testing.T) {
- // Test swaping l & r, all branches of if-else, and exit loop when 'j < len(r)'
- ids1 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})
- ids2 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})
- exp := inmem.SeriesIDs([]uint64{1, 3})
+ // Test swapping l & r, all branches of if-else, and exit loop when 'j < len(r)'
+ ids1 := seriesIDs([]uint64{1, 3, 4, 5, 6})
+ ids2 := seriesIDs([]uint64{1, 2, 3, 7})
+ exp := seriesIDs([]uint64{1, 3})
got := ids1.Intersect(ids2)
if !exp.Equals(got) {
@@ -38,9 +37,9 @@ func TestSeriesIDs_Intersect(t *testing.T) {
}
// Test exit for loop when 'i < len(l)'
- ids1 = inmem.SeriesIDs([]uint64{1})
- ids2 = inmem.SeriesIDs([]uint64{1, 2})
- exp = inmem.SeriesIDs([]uint64{1})
+ ids1 = seriesIDs([]uint64{1})
+ ids2 = seriesIDs([]uint64{1, 2})
+ exp = seriesIDs([]uint64{1})
got = ids1.Intersect(ids2)
if !exp.Equals(got) {
@@ -51,9 +50,9 @@ func TestSeriesIDs_Intersect(t *testing.T) {
// Test union sets of SeriesIDs.
func TestSeriesIDs_Union(t *testing.T) {
// Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left.
- ids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})
- ids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})
- exp := inmem.SeriesIDs([]uint64{1, 2, 3, 4, 5, 6, 7})
+ ids1 := seriesIDs([]uint64{1, 2, 3, 7})
+ ids2 := seriesIDs([]uint64{1, 3, 4, 5, 6})
+ exp := seriesIDs([]uint64{1, 2, 3, 4, 5, 6, 7})
got := ids1.Union(ids2)
if !exp.Equals(got) {
@@ -61,9 +60,9 @@ func TestSeriesIDs_Union(t *testing.T) {
}
// Test exit because of 'i < len(l)' and append remainder from right.
- ids1 = inmem.SeriesIDs([]uint64{1})
- ids2 = inmem.SeriesIDs([]uint64{1, 2})
- exp = inmem.SeriesIDs([]uint64{1, 2})
+ ids1 = seriesIDs([]uint64{1})
+ ids2 = seriesIDs([]uint64{1, 2})
+ exp = seriesIDs([]uint64{1, 2})
got = ids1.Union(ids2)
if !exp.Equals(got) {
@@ -74,9 +73,9 @@ func TestSeriesIDs_Union(t *testing.T) {
// Test removing one set of SeriesIDs from another.
func TestSeriesIDs_Reject(t *testing.T) {
// Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left.
- ids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})
- ids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})
- exp := inmem.SeriesIDs([]uint64{2, 7})
+ ids1 := seriesIDs([]uint64{1, 2, 3, 7})
+ ids2 := seriesIDs([]uint64{1, 3, 4, 5, 6})
+ exp := seriesIDs([]uint64{2, 7})
got := ids1.Reject(ids2)
if !exp.Equals(got) {
@@ -84,9 +83,9 @@ func TestSeriesIDs_Reject(t *testing.T) {
}
// Test exit because of 'i < len(l)'.
- ids1 = inmem.SeriesIDs([]uint64{1})
- ids2 = inmem.SeriesIDs([]uint64{1, 2})
- exp = inmem.SeriesIDs{}
+ ids1 = seriesIDs([]uint64{1})
+ ids2 = seriesIDs([]uint64{1, 2})
+ exp = seriesIDs{}
got = ids1.Reject(ids2)
if !exp.Equals(got) {
@@ -95,14 +94,14 @@ func TestSeriesIDs_Reject(t *testing.T) {
}
func TestMeasurement_AddSeries_Nil(t *testing.T) {
- m := inmem.NewMeasurement("foo", "cpu")
+ m := newMeasurement("foo", "cpu")
if m.AddSeries(nil) {
t.Fatalf("AddSeries mismatch: exp false, got true")
}
}
func TestMeasurement_AppendSeriesKeysByID_Missing(t *testing.T) {
- m := inmem.NewMeasurement("foo", "cpu")
+ m := newMeasurement("foo", "cpu")
var dst []string
dst = m.AppendSeriesKeysByID(dst, []uint64{1})
if exp, got := 0, len(dst); exp != got {
@@ -111,9 +110,8 @@ func TestMeasurement_AppendSeriesKeysByID_Missing(t *testing.T) {
}
func TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) {
- m := inmem.NewMeasurement("foo", "cpu")
- s := inmem.NewSeries([]byte("cpu,host=foo"), models.Tags{models.NewTag([]byte("host"), []byte("foo"))})
- s.ID = 1
+ m := newMeasurement("foo", "cpu")
+ s := newSeries(1, m, "cpu,host=foo", models.Tags{models.NewTag([]byte("host"), []byte("foo"))})
m.AddSeries(s)
var dst []string
@@ -128,13 +126,11 @@ func TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) {
}
func TestMeasurement_TagsSet_Deadlock(t *testing.T) {
- m := inmem.NewMeasurement("foo", "cpu")
- s1 := inmem.NewSeries([]byte("cpu,host=foo"), models.Tags{models.NewTag([]byte("host"), []byte("foo"))})
- s1.ID = 1
+ m := newMeasurement("foo", "cpu")
+ s1 := newSeries(1, m, "cpu,host=foo", models.Tags{models.NewTag([]byte("host"), []byte("foo"))})
m.AddSeries(s1)
- s2 := inmem.NewSeries([]byte("cpu,host=bar"), models.Tags{models.NewTag([]byte("host"), []byte("bar"))})
- s2.ID = 2
+ s2 := newSeries(2, m, "cpu,host=bar", models.Tags{models.NewTag([]byte("host"), []byte("bar"))})
m.AddSeries(s2)
m.DropSeries(s1)
@@ -147,12 +143,11 @@ func TestMeasurement_TagsSet_Deadlock(t *testing.T) {
}
func BenchmarkMeasurement_SeriesIDForExp_EQRegex(b *testing.B) {
- m := inmem.NewMeasurement("foo", "cpu")
+ m := newMeasurement("foo", "cpu")
for i := 0; i < 100000; i++ {
- s := inmem.NewSeries([]byte("cpu"), models.Tags{models.NewTag(
+ s := newSeries(uint64(i), m, "cpu", models.Tags{models.NewTag(
[]byte("host"),
[]byte(fmt.Sprintf("host%d", i)))})
- s.ID = uint64(i)
m.AddSeries(s)
}
@@ -178,12 +173,11 @@ func BenchmarkMeasurement_SeriesIDForExp_EQRegex(b *testing.B) {
}
func BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) {
- m := inmem.NewMeasurement("foo", "cpu")
+ m := newMeasurement("foo", "cpu")
for i := 0; i < 100000; i++ {
- s := inmem.NewSeries([]byte("cpu"), models.Tags{models.Tag{
+ s := newSeries(uint64(i), m, "cpu", models.Tags{models.Tag{
Key: []byte("host"),
Value: []byte(fmt.Sprintf("host%d", i))}})
- s.ID = uint64(i)
m.AddSeries(s)
}
@@ -210,11 +204,10 @@ func BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) {
}
func benchmarkTagSets(b *testing.B, n int, opt query.IteratorOptions) {
- m := inmem.NewMeasurement("foo", "m")
+ m := newMeasurement("foo", "m")
for i := 0; i < n; i++ {
tags := map[string]string{"tag1": "value1", "tag2": "value2"}
- s := inmem.NewSeries([]byte(fmt.Sprintf("m,tag1=value1,tag2=value2")), models.NewTags(tags))
- s.ID = uint64(i)
+ s := newSeries(uint64(i), m, fmt.Sprintf("m,tag1=value1,tag2=value2"), models.NewTags(tags))
s.AssignShard(0, time.Now().UnixNano())
m.AddSeries(s)
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go b/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go
index 6d5e0a6a23..8e5f689a60 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go
@@ -10,28 +10,26 @@ import (
// File is a mock implementation of a tsi1.File.
type File struct {
- Closef func() error
- Pathf func() string
- IDf func() int
- Levelf func() int
- Measurementf func(name []byte) tsi1.MeasurementElem
- MeasurementIteratorf func() tsi1.MeasurementIterator
- HasSeriesf func(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool)
- Seriesf func(name []byte, tags models.Tags) tsdb.SeriesElem
- SeriesNf func() uint64
- TagKeyf func(name, key []byte) tsi1.TagKeyElem
- TagKeyIteratorf func(name []byte) tsi1.TagKeyIterator
- TagValuef func(name, key, value []byte) tsi1.TagValueElem
- TagValueIteratorf func(name, key []byte) tsi1.TagValueIterator
- SeriesIteratorf func() tsdb.SeriesIterator
- MeasurementSeriesIteratorf func(name []byte) tsdb.SeriesIterator
- TagKeySeriesIteratorf func(name, key []byte) tsdb.SeriesIterator
- TagValueSeriesIteratorf func(name, key, value []byte) tsdb.SeriesIterator
- MergeSeriesSketchesf func(s, t estimator.Sketch) error
- MergeMeasurementsSketchesf func(s, t estimator.Sketch) error
- Retainf func()
- Releasef func()
- Filterf func() *bloom.Filter
+ Closef func() error
+ Pathf func() string
+ IDf func() int
+ Levelf func() int
+ Measurementf func(name []byte) tsi1.MeasurementElem
+ MeasurementIteratorf func() tsi1.MeasurementIterator
+ HasSeriesf func(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool)
+ TagKeyf func(name, key []byte) tsi1.TagKeyElem
+ TagKeyIteratorf func(name []byte) tsi1.TagKeyIterator
+ TagValuef func(name, key, value []byte) tsi1.TagValueElem
+ TagValueIteratorf func(name, key []byte) tsi1.TagValueIterator
+ SeriesIDIteratorf func() tsdb.SeriesIDIterator
+ MeasurementSeriesIDIteratorf func(name []byte) tsdb.SeriesIDIterator
+ TagKeySeriesIDIteratorf func(name, key []byte) tsdb.SeriesIDIterator
+ TagValueSeriesIDIteratorf func(name, key, value []byte) tsdb.SeriesIDIterator
+ MergeSeriesSketchesf func(s, t estimator.Sketch) error
+ MergeMeasurementsSketchesf func(s, t estimator.Sketch) error
+ Retainf func()
+ Releasef func()
+ Filterf func() *bloom.Filter
}
func (f *File) Close() error { return f.Closef() }
@@ -43,25 +41,24 @@ func (f *File) MeasurementIterator() tsi1.MeasurementIterator { return f.Measure
func (f *File) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {
return f.HasSeriesf(name, tags, buf)
}
-func (f *File) Series(name []byte, tags models.Tags) tsdb.SeriesElem { return f.Seriesf(name, tags) }
-func (f *File) SeriesN() uint64 { return f.SeriesNf() }
-func (f *File) TagKey(name, key []byte) tsi1.TagKeyElem { return f.TagKeyf(name, key) }
-func (f *File) TagKeyIterator(name []byte) tsi1.TagKeyIterator { return f.TagKeyIteratorf(name) }
+func (f *File) TagKey(name, key []byte) tsi1.TagKeyElem { return f.TagKeyf(name, key) }
+func (f *File) TagKeyIterator(name []byte) tsi1.TagKeyIterator { return f.TagKeyIteratorf(name) }
+
func (f *File) TagValue(name, key, value []byte) tsi1.TagValueElem {
return f.TagValuef(name, key, value)
}
func (f *File) TagValueIterator(name, key []byte) tsi1.TagValueIterator {
return f.TagValueIteratorf(name, key)
}
-func (f *File) SeriesIterator() tsdb.SeriesIterator { return f.SeriesIteratorf() }
-func (f *File) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
- return f.MeasurementSeriesIteratorf(name)
+func (f *File) SeriesIDIterator() tsdb.SeriesIDIterator { return f.SeriesIDIteratorf() }
+func (f *File) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
+ return f.MeasurementSeriesIDIteratorf(name)
}
-func (f *File) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
- return f.TagKeySeriesIteratorf(name, key)
+func (f *File) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
+ return f.TagKeySeriesIDIteratorf(name, key)
}
-func (f *File) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator {
- return f.TagValueSeriesIteratorf(name, key, value)
+func (f *File) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
+ return f.TagValueSeriesIDIteratorf(name, key, value)
}
func (f *File) MergeSeriesSketches(s, t estimator.Sketch) error { return f.MergeSeriesSketchesf(s, t) }
func (f *File) MergeMeasurementsSketches(s, t estimator.Sketch) error {
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go
index 36b13f6d05..1987c76c88 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go
@@ -2,16 +2,12 @@ package tsi1
import (
"bytes"
- "errors"
"fmt"
"regexp"
+ "sync"
- "github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/pkg/bloom"
- "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/estimator/hll"
- "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
)
@@ -19,24 +15,20 @@ import (
// FileSet represents a collection of files.
type FileSet struct {
levels []CompactionLevel
+ sfile *tsdb.SeriesFile
files []File
- filters []*bloom.Filter // per-level filters
database string
manifestSize int64 // Size of the manifest file in bytes.
}
// NewFileSet returns a new instance of FileSet.
-func NewFileSet(database string, levels []CompactionLevel, files []File) (*FileSet, error) {
- fs := &FileSet{
+func NewFileSet(database string, levels []CompactionLevel, sfile *tsdb.SeriesFile, files []File) (*FileSet, error) {
+ return &FileSet{
levels: levels,
+ sfile: sfile,
files: files,
- filters: make([]*bloom.Filter, len(levels)),
database: database,
- }
- if err := fs.buildFilters(); err != nil {
- return nil, err
- }
- return fs, nil
+ }, nil
}
// Close closes all the files in the file set.
@@ -64,14 +56,17 @@ func (fs *FileSet) Release() {
}
}
+// SeriesFile returns the attached series file.
+func (fs *FileSet) SeriesFile() *tsdb.SeriesFile { return fs.sfile }
+
// PrependLogFile returns a new file set with f added at the beginning.
// Filters do not need to be rebuilt because log files have no bloom filter.
func (fs *FileSet) PrependLogFile(f *LogFile) *FileSet {
return &FileSet{
database: fs.database,
levels: fs.levels,
+ sfile: fs.sfile,
files: append([]File{f}, fs.files...),
- filters: fs.filters,
}
}
@@ -112,27 +107,12 @@ func (fs *FileSet) MustReplace(oldFiles []File, newFile File) *FileSet {
other[i] = newFile
copy(other[i+1:], fs.files[i+len(oldFiles):])
- // Copy existing bloom filters.
- filters := make([]*bloom.Filter, len(fs.filters))
- // copy(filters, fs.filters)
-
- // Clear filters at replaced file levels.
- filters[newFile.Level()] = nil
- for _, f := range oldFiles {
- filters[f.Level()] = nil
- }
-
// Build new fileset and rebuild changed filters.
- newFS := &FileSet{
+ return &FileSet{
levels: fs.levels,
files: other,
- filters: filters,
database: fs.database,
}
- if err := newFS.buildFilters(); err != nil {
- panic("cannot build file set: " + err.Error())
- }
- return newFS
}
// MaxID returns the highest file identifier.
@@ -196,11 +176,12 @@ func (fs *FileSet) LastContiguousIndexFilesByLevel(level int) []*IndexFile {
return a
}
-// SeriesIterator returns an iterator over all series in the index.
-func (fs *FileSet) SeriesIterator() tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(fs.files))
+/*
+// SeriesIDIterator returns an iterator over all series in the index.
+func (fs *FileSet) SeriesIDIterator() tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(fs.files))
for _, f := range fs.files {
- itr := f.SeriesIterator()
+ itr := f.SeriesIDIterator()
if itr == nil {
continue
}
@@ -208,6 +189,7 @@ func (fs *FileSet) SeriesIterator() tsdb.SeriesIterator {
}
return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))
}
+*/
// Measurement returns a measurement by name.
func (fs *FileSet) Measurement(name []byte) MeasurementElem {
@@ -232,32 +214,31 @@ func (fs *FileSet) MeasurementIterator() MeasurementIterator {
a = append(a, itr)
}
}
- return FilterUndeletedMeasurementIterator(MergeMeasurementIterators(a...))
+ return MergeMeasurementIterators(a...)
}
-// MeasurementSeriesIterator returns an iterator over all non-tombstoned series
-// in the index for the provided measurement.
-func (fs *FileSet) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(fs.files))
+// TagKeyIterator returns an iterator over all tag keys for a measurement.
+func (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator {
+ a := make([]TagKeyIterator, 0, len(fs.files))
for _, f := range fs.files {
- itr := f.MeasurementSeriesIterator(name)
+ itr := f.TagKeyIterator(name)
if itr != nil {
a = append(a, itr)
}
}
- return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))
+ return MergeTagKeyIterators(a...)
}
-// TagKeyIterator returns an iterator over all tag keys for a measurement.
-func (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator {
- a := make([]TagKeyIterator, 0, len(fs.files))
+// MeasurementSeriesIDIterator returns a series iterator for a measurement.
+func (fs *FileSet) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(fs.files))
for _, f := range fs.files {
- itr := f.TagKeyIterator(name)
+ itr := f.MeasurementSeriesIDIterator(name)
if itr != nil {
a = append(a, itr)
}
}
- return MergeTagKeyIterators(a...)
+ return tsdb.MergeSeriesIDIterators(a...)
}
// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
@@ -331,88 +312,43 @@ func (fs *FileSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (ma
return nil, fmt.Errorf("%#v", expr)
}
-// tagValuesByKeyAndExpr retrieves tag values for the provided tag keys.
-//
-// tagValuesByKeyAndExpr returns sets of values for each key, indexable by the
-// position of the tag key in the keys argument.
-//
-// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending
-// lexicographic order.
-func (fs *FileSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([]map[string]struct{}, error) {
- itr, err := fs.seriesByExprIterator(name, expr, fieldset.Fields(string(name)))
- if err != nil {
- return nil, err
- } else if itr == nil {
- return nil, nil
- }
-
- keyIdxs := make(map[string]int, len(keys))
- for ki, key := range keys {
- keyIdxs[key] = ki
-
- // Check that keys are in order.
- if ki > 0 && key < keys[ki-1] {
- return nil, fmt.Errorf("keys %v are not in ascending order", keys)
- }
- }
-
- resultSet := make([]map[string]struct{}, len(keys))
- for i := 0; i < len(resultSet); i++ {
- resultSet[i] = make(map[string]struct{})
- }
-
- // Iterate all series to collect tag values.
- for e := itr.Next(); e != nil; e = itr.Next() {
- if auth != nil && !auth.AuthorizeSeriesRead(fs.database, e.Name(), e.Tags()) {
- continue
- }
- for _, t := range e.Tags() {
- if idx, ok := keyIdxs[string(t.Key)]; ok {
- resultSet[idx][string(t.Value)] = struct{}{}
- } else if string(t.Key) > keys[len(keys)-1] {
- // The tag key is > the largest key we're interested in.
- break
- }
- }
- }
- return resultSet, nil
-}
-
// tagKeysByFilter will filter the tag keys for the measurement.
func (fs *FileSet) tagKeysByFilter(name []byte, op influxql.Token, val []byte, regex *regexp.Regexp) map[string]struct{} {
ss := make(map[string]struct{})
itr := fs.TagKeyIterator(name)
- for e := itr.Next(); e != nil; e = itr.Next() {
- var matched bool
- switch op {
- case influxql.EQ:
- matched = bytes.Equal(e.Key(), val)
- case influxql.NEQ:
- matched = !bytes.Equal(e.Key(), val)
- case influxql.EQREGEX:
- matched = regex.Match(e.Key())
- case influxql.NEQREGEX:
- matched = !regex.Match(e.Key())
- }
+ if itr != nil {
+ for e := itr.Next(); e != nil; e = itr.Next() {
+ var matched bool
+ switch op {
+ case influxql.EQ:
+ matched = bytes.Equal(e.Key(), val)
+ case influxql.NEQ:
+ matched = !bytes.Equal(e.Key(), val)
+ case influxql.EQREGEX:
+ matched = regex.Match(e.Key())
+ case influxql.NEQREGEX:
+ matched = !regex.Match(e.Key())
+ }
- if !matched {
- continue
+ if !matched {
+ continue
+ }
+ ss[string(e.Key())] = struct{}{}
}
- ss[string(e.Key())] = struct{}{}
}
return ss
}
-// TagKeySeriesIterator returns a series iterator for all values across a single key.
-func (fs *FileSet) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(fs.files))
+// TagKeySeriesIDIterator returns a series iterator for all values across a single key.
+func (fs *FileSet) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(fs.files))
for _, f := range fs.files {
- itr := f.TagKeySeriesIterator(name, key)
+ itr := f.TagKeySeriesIDIterator(name, key)
if itr != nil {
a = append(a, itr)
}
}
- return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))
+ return tsdb.MergeSeriesIDIterators(a...)
}
// HasTagKey returns true if the tag key exists.
@@ -447,678 +383,154 @@ func (fs *FileSet) TagValueIterator(name, key []byte) TagValueIterator {
return MergeTagValueIterators(a...)
}
-// TagValueSeriesIterator returns a series iterator for a single tag value.
-func (fs *FileSet) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(fs.files))
+// TagValueSeriesIDIterator returns a series iterator for a single tag value.
+func (fs *FileSet) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(fs.files))
for _, f := range fs.files {
- itr := f.TagValueSeriesIterator(name, key, value)
+ itr := f.TagValueSeriesIDIterator(name, key, value)
if itr != nil {
a = append(a, itr)
}
}
- return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))
+ return tsdb.MergeSeriesIDIterators(a...)
}
-// MatchTagValueSeriesIterator returns a series iterator for tags which match value.
-// If matches is false, returns iterators which do not match value.
-func (fs *FileSet) MatchTagValueSeriesIterator(name, key []byte, value *regexp.Regexp, matches bool) tsdb.SeriesIterator {
- matchEmpty := value.MatchString("")
+// MeasurementsSketches returns the merged measurement sketches for the FileSet.
+func (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
+ sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus()
- if matches {
- if matchEmpty {
- return FilterUndeletedSeriesIterator(fs.matchTagValueEqualEmptySeriesIterator(name, key, value))
+ // Iterate over all the files and merge the sketches into the result.
+ for _, f := range fs.files {
+ if err := f.MergeMeasurementsSketches(sketch, tsketch); err != nil {
+ return nil, nil, err
}
- return FilterUndeletedSeriesIterator(fs.matchTagValueEqualNotEmptySeriesIterator(name, key, value))
- }
-
- if matchEmpty {
- return FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualEmptySeriesIterator(name, key, value))
}
- return FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualNotEmptySeriesIterator(name, key, value))
+ return sketch, tsketch, nil
}
-func (fs *FileSet) matchTagValueEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) tsdb.SeriesIterator {
- vitr := fs.TagValueIterator(name, key)
- if vitr == nil {
- return fs.MeasurementSeriesIterator(name)
- }
-
- var itrs []tsdb.SeriesIterator
- for e := vitr.Next(); e != nil; e = vitr.Next() {
- if !value.Match(e.Value()) {
- itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))
- }
- }
+// File represents a log or index file.
+type File interface {
+ Close() error
+ Path() string
- return DifferenceSeriesIterators(
- fs.MeasurementSeriesIterator(name),
- MergeSeriesIterators(itrs...),
- )
-}
+ ID() int
+ Level() int
-func (fs *FileSet) matchTagValueEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) tsdb.SeriesIterator {
- vitr := fs.TagValueIterator(name, key)
- if vitr == nil {
- return nil
- }
+ Measurement(name []byte) MeasurementElem
+ MeasurementIterator() MeasurementIterator
- var itrs []tsdb.SeriesIterator
- for e := vitr.Next(); e != nil; e = vitr.Next() {
- if value.Match(e.Value()) {
- itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))
- }
- }
- return MergeSeriesIterators(itrs...)
-}
+ TagKey(name, key []byte) TagKeyElem
+ TagKeyIterator(name []byte) TagKeyIterator
-func (fs *FileSet) matchTagValueNotEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) tsdb.SeriesIterator {
- vitr := fs.TagValueIterator(name, key)
- if vitr == nil {
- return nil
- }
+ TagValue(name, key, value []byte) TagValueElem
+ TagValueIterator(name, key []byte) TagValueIterator
- var itrs []tsdb.SeriesIterator
- for e := vitr.Next(); e != nil; e = vitr.Next() {
- if !value.Match(e.Value()) {
- itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))
- }
- }
- return MergeSeriesIterators(itrs...)
-}
+ // Series iteration.
+ MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator
+ TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator
+ TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator
-func (fs *FileSet) matchTagValueNotEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) tsdb.SeriesIterator {
- vitr := fs.TagValueIterator(name, key)
- if vitr == nil {
- return fs.MeasurementSeriesIterator(name)
- }
+ // Sketches for cardinality estimation
+ MergeMeasurementsSketches(s, t estimator.Sketch) error
- var itrs []tsdb.SeriesIterator
- for e := vitr.Next(); e != nil; e = vitr.Next() {
- if value.Match(e.Value()) {
- itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))
- }
- }
+ // Reference counting.
+ Retain()
+ Release()
- return DifferenceSeriesIterators(
- fs.MeasurementSeriesIterator(name),
- MergeSeriesIterators(itrs...),
- )
+ // Size of file on disk
+ Size() int64
}
-func (fs *FileSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
- // Return filtered list if expression exists.
- if expr != nil {
- return fs.measurementNamesByExpr(auth, expr)
- }
-
- itr := fs.MeasurementIterator()
- if itr == nil {
- return nil, nil
- }
+type Files []File
- // Iterate over all measurements if no condition exists.
- var names [][]byte
- for e := itr.Next(); e != nil; e = itr.Next() {
- if fs.measurementAuthorizedSeries(auth, e.Name()) {
- names = append(names, e.Name())
- }
+func (a Files) IDs() []int {
+ ids := make([]int, len(a))
+ for i := range a {
+ ids[i] = a[i].ID()
}
- return names, nil
+ return ids
}
-func (fs *FileSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
- if expr == nil {
- return nil, nil
- }
-
- switch e := expr.(type) {
- case *influxql.BinaryExpr:
- switch e.Op {
- case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:
- tag, ok := e.LHS.(*influxql.VarRef)
- if !ok {
- return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String())
- }
-
- // Retrieve value or regex expression from RHS.
- var value string
- var regex *regexp.Regexp
- if influxql.IsRegexOp(e.Op) {
- re, ok := e.RHS.(*influxql.RegexLiteral)
- if !ok {
- return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String())
- }
- regex = re.Val
- } else {
- s, ok := e.RHS.(*influxql.StringLiteral)
- if !ok {
- return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String())
- }
- value = s.Val
- }
-
- // Match on name, if specified.
- if tag.Val == "_name" {
- return fs.measurementNamesByNameFilter(auth, e.Op, value, regex), nil
- } else if influxql.IsSystemName(tag.Val) {
- return nil, nil
- }
- return fs.measurementNamesByTagFilter(auth, e.Op, tag.Val, value, regex), nil
-
- case influxql.OR, influxql.AND:
- lhs, err := fs.measurementNamesByExpr(auth, e.LHS)
- if err != nil {
- return nil, err
- }
-
- rhs, err := fs.measurementNamesByExpr(auth, e.RHS)
- if err != nil {
- return nil, err
- }
-
- if e.Op == influxql.OR {
- return bytesutil.Union(lhs, rhs), nil
- }
- return bytesutil.Intersect(lhs, rhs), nil
-
- default:
- return nil, fmt.Errorf("invalid tag comparison operator")
- }
-
- case *influxql.ParenExpr:
- return fs.measurementNamesByExpr(auth, e.Expr)
- default:
- return nil, fmt.Errorf("%#v", expr)
- }
+// fileSetSeriesIDIterator attaches a fileset to an iterator that is released on close.
+type fileSetSeriesIDIterator struct {
+ once sync.Once
+ fs *FileSet
+ itr tsdb.SeriesIDIterator
}
-// measurementNamesByNameFilter returns matching measurement names in sorted order.
-func (fs *FileSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) [][]byte {
- itr := fs.MeasurementIterator()
+func newFileSetSeriesIDIterator(fs *FileSet, itr tsdb.SeriesIDIterator) tsdb.SeriesIDIterator {
if itr == nil {
+ fs.Release()
return nil
}
-
- var names [][]byte
- for e := itr.Next(); e != nil; e = itr.Next() {
- var matched bool
- switch op {
- case influxql.EQ:
- matched = string(e.Name()) == val
- case influxql.NEQ:
- matched = string(e.Name()) != val
- case influxql.EQREGEX:
- matched = regex.Match(e.Name())
- case influxql.NEQREGEX:
- matched = !regex.Match(e.Name())
- }
-
- if matched && fs.measurementAuthorizedSeries(auth, e.Name()) {
- names = append(names, e.Name())
- }
- }
- bytesutil.Sort(names)
- return names
-}
-
-func (fs *FileSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) [][]byte {
- var names [][]byte
-
- mitr := fs.MeasurementIterator()
- if mitr == nil {
- return nil
- }
-
- // valEqual determines if the provided []byte] is equal to the tag value
- // to be filtered on.
- valEqual := regex.Match
- if op == influxql.EQ || op == influxql.NEQ {
- vb := []byte(val)
- valEqual = func(b []byte) bool { return bytes.Equal(vb, b) }
- }
-
- var tagMatch bool
- var authorized bool
- for me := mitr.Next(); me != nil; me = mitr.Next() {
- // If the measurement doesn't have the tag key, then it won't be considered.
- if !fs.HasTagKey(me.Name(), []byte(key)) {
- continue
- }
-
- tagMatch = false
- // Authorization must be explicitly granted when an authorizer is present.
- authorized = auth == nil
-
- vitr := fs.TagValueIterator(me.Name(), []byte(key))
- if vitr != nil {
- for ve := vitr.Next(); ve != nil; ve = vitr.Next() {
- if !valEqual(ve.Value()) {
- continue
- }
-
- tagMatch = true
- if auth == nil {
- break
- }
-
- // When an authorizer is present, the measurement should be
- // included only if one of it's series is authorized.
- sitr := fs.TagValueSeriesIterator(me.Name(), []byte(key), ve.Value())
- if sitr == nil {
- continue
- }
- // Locate a series with this matching tag value that's authorized.
- for se := sitr.Next(); se != nil; se = sitr.Next() {
- if auth.AuthorizeSeriesRead(fs.database, me.Name(), se.Tags()) {
- authorized = true
- break
- }
- }
-
- if tagMatch && authorized {
- // The measurement can definitely be included or rejected.
- break
- }
- }
- }
-
- // For negation operators, to determine if the measurement is authorized,
- // an authorized series belonging to the measurement must be located.
- // Then, the measurement can be added iff !tagMatch && authorized.
- if op == influxql.NEQ || op == influxql.NEQREGEX && !tagMatch {
- authorized = fs.measurementAuthorizedSeries(auth, me.Name())
- }
-
- // tags match | operation is EQ | measurement matches
- // --------------------------------------------------
- // True | True | True
- // True | False | False
- // False | True | False
- // False | False | True
- if tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) && authorized {
- names = append(names, me.Name())
- }
- }
-
- bytesutil.Sort(names)
- return names
+ return &fileSetSeriesIDIterator{fs: fs, itr: itr}
}
-// measurementAuthorizedSeries determines if the measurement contains a series
-// that is authorized to be read.
-func (fs *FileSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte) bool {
- if auth == nil {
- return true
- }
-
- sitr := fs.MeasurementSeriesIterator(name)
- for series := sitr.Next(); series != nil; series = sitr.Next() {
- if auth.AuthorizeSeriesRead(fs.database, name, series.Tags()) {
- return true
- }
- }
- return false
+func (itr *fileSetSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) {
+ return itr.itr.Next()
}
-// HasSeries returns true if the series exists and is not tombstoned.
-func (fs *FileSet) HasSeries(name []byte, tags models.Tags, buf []byte) bool {
- for _, f := range fs.files {
- if exists, tombstoned := f.HasSeries(name, tags, buf); exists {
- return !tombstoned
- }
- }
- return false
+func (itr *fileSetSeriesIDIterator) Close() error {
+ itr.once.Do(func() { itr.fs.Release() })
+ return itr.itr.Close()
}
-// FilterNamesTags filters out any series which already exist. It modifies the
-// provided slices of names and tags.
-func (fs *FileSet) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]byte, []models.Tags) {
- buf := make([]byte, 4096)
-
- // Filter across all log files.
- // Log files obtain a read lock and should be done in bulk for performance.
- for _, f := range fs.LogFiles() {
- names, tagsSlice = f.FilterNamesTags(names, tagsSlice)
- }
-
- // Filter across remaining index files.
- indexFiles := fs.IndexFiles()
- newNames, newTagsSlice := names[:0], tagsSlice[:0]
- for i := range names {
- name, tags := names[i], tagsSlice[i]
- currentLevel, skipLevel := -1, false
-
- var exists, tombstoned bool
- for j := 0; j < len(indexFiles); j++ {
- f := indexFiles[j]
-
- // Check for existence on the level when it changes.
- if level := f.Level(); currentLevel != level {
- currentLevel, skipLevel = level, false
-
- if filter := fs.filters[level]; filter != nil {
- if !filter.Contains(AppendSeriesKey(buf[:0], name, tags)) {
- skipLevel = true
- }
- }
- }
-
- // Skip file if in level where it doesn't exist.
- if skipLevel {
- continue
- }
-
- // Stop once we find the series in a file.
- if exists, tombstoned = f.HasSeries(name, tags, buf); exists {
- break
- }
- }
-
- // If the series doesn't exist or it has been tombstoned then add it.
- if !exists || tombstoned {
- newNames = append(newNames, name)
- newTagsSlice = append(newTagsSlice, tags)
- }
- }
-
- return newNames, newTagsSlice
+// fileSetMeasurementIterator attaches a fileset to an iterator that is released on close.
+type fileSetMeasurementIterator struct {
+ once sync.Once
+ fs *FileSet
+ itr tsdb.MeasurementIterator
}
-// SeriesSketches returns the merged series sketches for the FileSet.
-func (fs *FileSet) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
- sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus()
-
- // Iterate over all the files and merge the sketches into the result.
- for _, f := range fs.files {
- if err := f.MergeSeriesSketches(sketch, tsketch); err != nil {
- return nil, nil, err
- }
- }
- return sketch, tsketch, nil
+func newFileSetMeasurementIterator(fs *FileSet, itr tsdb.MeasurementIterator) *fileSetMeasurementIterator {
+ return &fileSetMeasurementIterator{fs: fs, itr: itr}
}
-// MeasurementsSketches returns the merged measurement sketches for the FileSet.
-func (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
- sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus()
-
- // Iterate over all the files and merge the sketches into the result.
- for _, f := range fs.files {
- if err := f.MergeMeasurementsSketches(sketch, tsketch); err != nil {
- return nil, nil, err
- }
- }
- return sketch, tsketch, nil
+func (itr *fileSetMeasurementIterator) Next() ([]byte, error) {
+ return itr.itr.Next()
}
-// MeasurementSeriesByExprIterator returns a series iterator for a measurement
-// that is filtered by expr. If expr only contains time expressions then this
-// call is equivalent to MeasurementSeriesIterator().
-func (fs *FileSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) (tsdb.SeriesIterator, error) {
- // Return all series for the measurement if there are no tag expressions.
- if expr == nil {
- return fs.MeasurementSeriesIterator(name), nil
- }
- return fs.seriesByExprIterator(name, expr, fieldset.CreateFieldsIfNotExists(name))
+func (itr *fileSetMeasurementIterator) Close() error {
+ itr.once.Do(func() { itr.fs.Release() })
+ return itr.itr.Close()
}
-// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
-func (fs *FileSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([][]byte, error) {
- // Create iterator for all matching series.
- itr, err := fs.MeasurementSeriesByExprIterator(name, expr, fieldset)
- if err != nil {
- return nil, err
- } else if itr == nil {
- return nil, nil
- }
-
- // Iterate over all series and generate keys.
- var keys [][]byte
- for e := itr.Next(); e != nil; e = itr.Next() {
- // Check for unsupported field filters.
- // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).
- if e.Expr() != nil {
- if v, ok := e.Expr().(*influxql.BooleanLiteral); !ok || !v.Val {
- return nil, errors.New("fields not supported in WHERE clause during deletion")
- }
- }
-
- keys = append(keys, models.MakeKey(e.Name(), e.Tags()))
- }
- return keys, nil
+// fileSetTagKeyIterator attaches a fileset to an iterator that is released on close.
+type fileSetTagKeyIterator struct {
+ once sync.Once
+ fs *FileSet
+ itr tsdb.TagKeyIterator
}
-func (fs *FileSet) seriesByExprIterator(name []byte, expr influxql.Expr, mf *tsdb.MeasurementFields) (tsdb.SeriesIterator, error) {
- switch expr := expr.(type) {
- case *influxql.BinaryExpr:
- switch expr.Op {
- case influxql.AND, influxql.OR:
- // Get the series IDs and filter expressions for the LHS.
- litr, err := fs.seriesByExprIterator(name, expr.LHS, mf)
- if err != nil {
- return nil, err
- }
-
- // Get the series IDs and filter expressions for the RHS.
- ritr, err := fs.seriesByExprIterator(name, expr.RHS, mf)
- if err != nil {
- return nil, err
- }
-
- // Intersect iterators if expression is "AND".
- if expr.Op == influxql.AND {
- return IntersectSeriesIterators(litr, ritr), nil
- }
-
- // Union iterators if expression is "OR".
- return UnionSeriesIterators(litr, ritr), nil
-
- default:
- return fs.seriesByBinaryExprIterator(name, expr, mf)
- }
-
- case *influxql.ParenExpr:
- return fs.seriesByExprIterator(name, expr.Expr, mf)
-
- default:
- return nil, nil
- }
-}
-
-// seriesByBinaryExprIterator returns a series iterator and a filtering expression.
-func (fs *FileSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr, mf *tsdb.MeasurementFields) (tsdb.SeriesIterator, error) {
- // If this binary expression has another binary expression, then this
- // is some expression math and we should just pass it to the underlying query.
- if _, ok := n.LHS.(*influxql.BinaryExpr); ok {
- return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil
- } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok {
- return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil
- }
-
- // Retrieve the variable reference from the correct side of the expression.
- key, ok := n.LHS.(*influxql.VarRef)
- value := n.RHS
- if !ok {
- key, ok = n.RHS.(*influxql.VarRef)
- if !ok {
- return nil, fmt.Errorf("invalid expression: %s", n.String())
- }
- value = n.LHS
- }
-
- // For fields, return all series from this measurement.
- if key.Val != "_name" && ((key.Type == influxql.Unknown && mf.HasField(key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) {
- return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil
- } else if value, ok := value.(*influxql.VarRef); ok {
- // Check if the RHS is a variable and if it is a field.
- if value.Val != "_name" && ((value.Type == influxql.Unknown && mf.HasField(value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) {
- return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil
- }
- }
-
- // Create iterator based on value type.
- switch value := value.(type) {
- case *influxql.StringLiteral:
- return fs.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op)
- case *influxql.RegexLiteral:
- return fs.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op)
- case *influxql.VarRef:
- return fs.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op)
- default:
- if n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX {
- return fs.MeasurementSeriesIterator(name), nil
- }
- return nil, nil
- }
+func newFileSetTagKeyIterator(fs *FileSet, itr tsdb.TagKeyIterator) *fileSetTagKeyIterator {
+ return &fileSetTagKeyIterator{fs: fs, itr: itr}
}
-func (fs *FileSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (tsdb.SeriesIterator, error) {
- // Special handling for "_name" to match measurement name.
- if bytes.Equal(key, []byte("_name")) {
- if (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) {
- return fs.MeasurementSeriesIterator(name), nil
- }
- return nil, nil
- }
-
- if op == influxql.EQ {
- // Match a specific value.
- if len(value) != 0 {
- return fs.TagValueSeriesIterator(name, key, value), nil
- }
-
- // Return all measurement series that have no values from this tag key.
- return DifferenceSeriesIterators(
- fs.MeasurementSeriesIterator(name),
- fs.TagKeySeriesIterator(name, key),
- ), nil
- }
-
- // Return all measurement series without this tag value.
- if len(value) != 0 {
- return DifferenceSeriesIterators(
- fs.MeasurementSeriesIterator(name),
- fs.TagValueSeriesIterator(name, key, value),
- ), nil
- }
-
- // Return all series across all values of this tag key.
- return fs.TagKeySeriesIterator(name, key), nil
+func (itr *fileSetTagKeyIterator) Next() ([]byte, error) {
+ return itr.itr.Next()
}
-func (fs *FileSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (tsdb.SeriesIterator, error) {
- // Special handling for "_name" to match measurement name.
- if bytes.Equal(key, []byte("_name")) {
- match := value.Match(name)
- if (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) {
- return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), &influxql.BooleanLiteral{Val: true}), nil
- }
- return nil, nil
- }
- return fs.MatchTagValueSeriesIterator(name, key, value, op == influxql.EQREGEX), nil
+func (itr *fileSetTagKeyIterator) Close() error {
+ itr.once.Do(func() { itr.fs.Release() })
+ return itr.itr.Close()
}
-func (fs *FileSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (tsdb.SeriesIterator, error) {
- if op == influxql.EQ {
- return IntersectSeriesIterators(
- fs.TagKeySeriesIterator(name, key),
- fs.TagKeySeriesIterator(name, []byte(value.Val)),
- ), nil
- }
-
- return DifferenceSeriesIterators(
- fs.TagKeySeriesIterator(name, key),
- fs.TagKeySeriesIterator(name, []byte(value.Val)),
- ), nil
+// fileSetTagValueIterator attaches a fileset to an iterator that is released on close.
+type fileSetTagValueIterator struct {
+ once sync.Once
+ fs *FileSet
+ itr tsdb.TagValueIterator
}
-// buildFilters builds a series existence filter for each compaction level.
-func (fs *FileSet) buildFilters() error {
- if len(fs.levels) == 0 {
- return nil
- }
-
- // Move past log files (level=0).
- files := fs.files
- for len(files) > 0 && files[0].Level() == 0 {
- files = files[1:]
- }
-
- // Build filters for each level where the filter is non-existent.
- for level := range fs.levels {
- // Clear filter if no files remain or next file is at a higher level.
- if len(files) == 0 || files[0].Level() > level {
- fs.filters[level] = nil
- continue
- }
-
- // Skip files at this level if filter already exists.
- if fs.filters[level] != nil {
- for len(files) > 0 && files[0].Level() == level {
- files = files[1:]
- }
- continue
- }
-
- // Build new filter from files at this level.
- fs.filters[level] = bloom.NewFilter(fs.levels[level].M, fs.levels[level].K)
- for len(files) > 0 && files[0].Level() == level {
- if err := fs.filters[level].Merge(files[0].Filter()); err != nil {
- return err
- }
- files = files[1:]
- }
- }
-
- return nil
+func newFileSetTagValueIterator(fs *FileSet, itr tsdb.TagValueIterator) *fileSetTagValueIterator {
+ return &fileSetTagValueIterator{fs: fs, itr: itr}
}
-// File represents a log or index file.
-type File interface {
- Close() error
- Path() string
-
- ID() int
- Level() int
-
- Measurement(name []byte) MeasurementElem
- MeasurementIterator() MeasurementIterator
- HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool)
- Series(name []byte, tags models.Tags) tsdb.SeriesElem
- SeriesN() uint64
-
- TagKey(name, key []byte) TagKeyElem
- TagKeyIterator(name []byte) TagKeyIterator
-
- TagValue(name, key, value []byte) TagValueElem
- TagValueIterator(name, key []byte) TagValueIterator
-
- // Series iteration.
- SeriesIterator() tsdb.SeriesIterator
- MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator
- TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator
- TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator
-
- // Sketches for cardinality estimation
- MergeSeriesSketches(s, t estimator.Sketch) error
- MergeMeasurementsSketches(s, t estimator.Sketch) error
-
- // Series existence bloom filter.
- Filter() *bloom.Filter
-
- // Reference counting.
- Retain()
- Release()
-
- // Size of file on disk
- Size() int64
+func (itr *fileSetTagValueIterator) Next() ([]byte, error) {
+ return itr.itr.Next()
}
-type Files []File
-
-func (a Files) IDs() []int {
- ids := make([]int, len(a))
- for i := range a {
- ids[i] = a[i].ID()
- }
- return ids
+func (itr *fileSetTagValueIterator) Close() error {
+ itr.once.Do(func() { itr.fs.Release() })
+ return itr.itr.Close()
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go
index be91eb94ac..1c2a44f91c 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go
@@ -2,14 +2,20 @@ package tsi1_test
import (
"fmt"
+ "reflect"
+ "sort"
"testing"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
)
// Ensure fileset can return an iterator over all series in the index.
-func TestFileSet_SeriesIterator(t *testing.T) {
- idx := MustOpenIndex()
+func TestFileSet_SeriesIDIterator(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Create initial set of series.
@@ -23,22 +29,22 @@ func TestFileSet_SeriesIterator(t *testing.T) {
// Verify initial set of series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
- itr := fs.SeriesIterator()
+ itr := fs.SeriesFile().SeriesIDIterator()
if itr == nil {
t.Fatal("expected iterator")
}
-
- if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); e != nil {
- t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String())
+ if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{
+ "cpu,[{region east}]",
+ "cpu,[{region west}]",
+ "mem,[{region east}]",
+ }) {
+ t.Fatalf("unexpected keys: %s", result)
}
})
@@ -53,33 +59,35 @@ func TestFileSet_SeriesIterator(t *testing.T) {
// Verify additional series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
- itr := fs.SeriesIterator()
+ itr := fs.SeriesFile().SeriesIDIterator()
if itr == nil {
t.Fatal("expected iterator")
}
- if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `disk` || len(e.Tags()) != 0 {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); e != nil {
- t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String())
+ if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{
+ "cpu,[{region east}]",
+ "cpu,[{region north}]",
+ "cpu,[{region west}]",
+ "disk,[]",
+ "mem,[{region east}]",
+ }) {
+ t.Fatalf("unexpected keys: %s", result)
}
})
}
// Ensure fileset can return an iterator over all series for one measurement.
-func TestFileSet_MeasurementSeriesIterator(t *testing.T) {
- idx := MustOpenIndex()
+func TestFileSet_MeasurementSeriesIDIterator(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Create initial set of series.
@@ -93,20 +101,22 @@ func TestFileSet_MeasurementSeriesIterator(t *testing.T) {
// Verify initial set of series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
- itr := fs.MeasurementSeriesIterator([]byte("cpu"))
+ itr := fs.MeasurementSeriesIDIterator([]byte("cpu"))
if itr == nil {
t.Fatal("expected iterator")
}
- if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); e != nil {
- t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String())
+ if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{
+ "cpu,[{region east}]",
+ "cpu,[{region west}]",
+ }) {
+ t.Fatalf("unexpected keys: %s", result)
}
})
@@ -120,29 +130,33 @@ func TestFileSet_MeasurementSeriesIterator(t *testing.T) {
// Verify additional series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
- itr := fs.MeasurementSeriesIterator([]byte("cpu"))
+ itr := fs.MeasurementSeriesIDIterator([]byte("cpu"))
if itr == nil {
t.Fatalf("expected iterator")
}
- if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {
- t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String())
- } else if e := itr.Next(); e != nil {
- t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String())
+ if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{
+ "cpu,[{region east}]",
+ "cpu,[{region north}]",
+ "cpu,[{region west}]",
+ }) {
+ t.Fatalf("unexpected keys: %s", result)
}
})
}
// Ensure fileset can return an iterator over all measurements for the index.
func TestFileSet_MeasurementIterator(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Create initial set of series.
@@ -155,7 +169,10 @@ func TestFileSet_MeasurementIterator(t *testing.T) {
// Verify initial set of series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
itr := fs.MeasurementIterator()
@@ -182,7 +199,10 @@ func TestFileSet_MeasurementIterator(t *testing.T) {
// Verify additional series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
itr := fs.MeasurementIterator()
@@ -204,7 +224,10 @@ func TestFileSet_MeasurementIterator(t *testing.T) {
// Ensure fileset can return an iterator over all keys for one measurement.
func TestFileSet_TagKeyIterator(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Create initial set of series.
@@ -218,7 +241,10 @@ func TestFileSet_TagKeyIterator(t *testing.T) {
// Verify initial set of series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
itr := fs.TagKeyIterator([]byte("cpu"))
@@ -245,7 +271,10 @@ func TestFileSet_TagKeyIterator(t *testing.T) {
// Verify additional series.
idx.Run(t, func(t *testing.T) {
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
itr := fs.TagKeyIterator([]byte("cpu"))
@@ -265,60 +294,22 @@ func TestFileSet_TagKeyIterator(t *testing.T) {
})
}
-var (
- byteSliceResult [][]byte
- tagsSliceResult []models.Tags
-)
-
-func BenchmarkFileset_FilterNamesTags(b *testing.B) {
- idx := MustOpenIndex()
- defer idx.Close()
-
- allNames := make([][]byte, 0, 2000*1000)
- allTags := make([]models.Tags, 0, 2000*1000)
-
- for i := 0; i < 2000; i++ {
- for j := 0; j < 1000; j++ {
- name := []byte(fmt.Sprintf("measurement-%d", i))
- tags := models.NewTags(map[string]string{"host": fmt.Sprintf("server-%d", j)})
- allNames = append(allNames, name)
- allTags = append(allTags, tags)
- }
+func MustReadAllSeriesIDIteratorString(sfile *tsdb.SeriesFile, itr tsdb.SeriesIDIterator) []string {
+ // Read all ids.
+ ids, err := tsdb.ReadAllSeriesIDIterator(itr)
+ if err != nil {
+ panic(err)
}
- if err := idx.CreateSeriesListIfNotExists(nil, allNames, allTags); err != nil {
- b.Fatal(err)
- }
- // idx.CheckFastCompaction()
-
- fs := idx.RetainFileSet()
- defer fs.Release()
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- b.StopTimer()
- names := [][]byte{
- []byte("foo"),
- []byte("measurement-222"), // filtered
- []byte("measurement-222"), // kept (tags won't match)
- []byte("measurements-1"),
- []byte("measurement-900"), // filtered
- []byte("measurement-44444"),
- []byte("bar"),
- }
+ // Convert to keys and sort.
+ keys := sfile.SeriesKeys(ids)
+ sort.Slice(keys, func(i, j int) bool { return tsdb.CompareSeriesKeys(keys[i], keys[j]) == -1 })
- tags := []models.Tags{
- nil,
- models.NewTags(map[string]string{"host": "server-297"}), // filtered
- models.NewTags(map[string]string{"host": "wrong"}),
- nil,
- models.NewTags(map[string]string{"host": "server-1026"}), // filtered
- models.NewTags(map[string]string{"host": "server-23"}), // kept (measurement won't match)
- models.NewTags(map[string]string{"host": "zoo"}),
- }
- b.StartTimer()
- byteSliceResult, tagsSliceResult = fs.FilterNamesTags(names, tags)
+ // Convert to strings.
+ a := make([]string, len(keys))
+ for i := range a {
+ name, tags := tsdb.ParseSeriesKey(keys[i])
+ a[i] = fmt.Sprintf("%s,%s", name, tags.String())
}
+ return a
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go
index 55b7d4a599..f8f6043e29 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go
@@ -1,131 +1,154 @@
package tsi1
import (
- "crypto/rand"
- "encoding/json"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"regexp"
- "sort"
+ "runtime"
"strconv"
- "strings"
"sync"
- "time"
+ "sync/atomic"
+ "github.com/cespare/xxhash"
"github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
- "github.com/influxdata/influxdb/query"
+ "github.com/influxdata/influxdb/pkg/estimator/hll"
+ "github.com/influxdata/influxdb/pkg/slices"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxql"
"go.uber.org/zap"
)
-const (
- // IndexName is the name of the index.
- IndexName = "tsi1"
-
- // Version is the current version of the TSI index.
- Version = 1
-)
-
-// Default compaction thresholds.
-const (
- DefaultMaxLogFileSize = 5 * 1024 * 1024
-)
+// IndexName is the name of the index.
+const IndexName = "tsi1"
func init() {
- tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {
- idx := NewIndex()
- idx.ShardID = id
- idx.Database = database
- idx.Path = path
- idx.options = opt
+ // FIXME(edd): Remove this.
+ if os.Getenv("TSI_PARTITIONS") != "" {
+ i, err := strconv.Atoi(os.Getenv("TSI_PARTITIONS"))
+ if err != nil {
+ panic(err)
+ }
+ DefaultPartitionN = uint64(i)
+ }
+
+ tsdb.RegisterIndex(IndexName, func(_ uint64, db, path string, _ *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, _ tsdb.EngineOptions) tsdb.Index {
+ idx := NewIndex(sfile, WithPath(path))
+ idx.database = db
return idx
})
}
-// File extensions.
-const (
- LogFileExt = ".tsl"
- IndexFileExt = ".tsi"
+// DefaultPartitionN determines how many shards the index will be partitioned into.
+//
+// NOTE: Currently, this must not be change once a database is created. Further,
+// it must also be a power of 2.
+//
+var DefaultPartitionN uint64 = 8
- CompactingExt = ".compacting"
-)
+// An IndexOption is a functional option for changing the configuration of
+// an Index.
+type IndexOption func(i *Index)
-// ManifestFileName is the name of the index manifest file.
-const ManifestFileName = "MANIFEST"
+// WithPath sets the root path of the Index
+var WithPath = func(path string) IndexOption {
+ return func(i *Index) {
+ i.path = path
+ }
+}
+
+// DisableCompactions disables compactions on the Index.
+var DisableCompactions = func() IndexOption {
+ return func(i *Index) {
+ i.disableCompactions = true
+ }
+}
-// Ensure index implements the interface.
-var _ tsdb.Index = &Index{}
+// WithLogger sets the logger for the Index.
+var WithLogger = func(l zap.Logger) IndexOption {
+ return func(i *Index) {
+ i.logger = l.With(zap.String("index", "tsi"))
+ }
+}
// Index represents a collection of layered index files and WAL.
type Index struct {
- mu sync.RWMutex
- opened bool
- options tsdb.EngineOptions
-
- activeLogFile *LogFile // current log file
- fileSet *FileSet // current file set
- seq int // file id sequence
-
- // Compaction management
- levels []CompactionLevel // compaction levels
- levelCompacting []bool // level compaction status
+ mu sync.RWMutex
+ partitions []*Partition
+ opened bool
- // Close management.
- once sync.Once
- closing chan struct{}
- wg sync.WaitGroup
+ // The following can be set when initialising an Index.
+ path string // Root directory of the index partitions.
+ disableCompactions bool // Initially disables compactions on the index.
+ logger *zap.Logger // Index's logger.
- // Fieldset shared with engine.
- fieldset *tsdb.MeasurementFieldSet
+ sfile *tsdb.SeriesFile // series lookup file
- // Associated shard info.
- ShardID uint64
+ // Index's version.
+ version int
// Name of database.
- Database string
+ database string
- // Root directory of the index files.
- Path string
-
- // Log file compaction thresholds.
- MaxLogFileSize int64
+ // Number of partitions used by the index.
+ PartitionN uint64
+}
- // Frequency of compaction checks.
- CompactionEnabled bool
- CompactionMonitorInterval time.Duration
+// NewIndex returns a new instance of Index.
+func NewIndex(sfile *tsdb.SeriesFile, options ...IndexOption) *Index {
+ idx := &Index{
+ logger: zap.NewNop(),
+ version: Version,
+ sfile: sfile,
+ PartitionN: DefaultPartitionN,
+ }
- logger *zap.Logger
+ for _, option := range options {
+ option(idx)
+ }
- // Index's version.
- version int
+ return idx
}
-// NewIndex returns a new instance of Index.
-func NewIndex() *Index {
- return &Index{
- closing: make(chan struct{}),
+// Database returns the name of the database the index was initialized with.
+func (i *Index) Database() string {
+ return i.database
+}
- // Default compaction thresholds.
- MaxLogFileSize: DefaultMaxLogFileSize,
- CompactionEnabled: true,
+// WithLogger sets the logger on the index after it's been created.
+//
+// It's not safe to call WithLogger after the index has been opened, or before
+// it has been closed.
+func (i *Index) WithLogger(l *zap.Logger) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
- logger: zap.NewNop(),
- version: Version,
+ for i, p := range i.partitions {
+ p.logger = l.With(zap.String("index", "tsi"), zap.String("partition", fmt.Sprint(i+1)))
}
+ i.logger = l.With(zap.String("index", "tsi"))
}
-// ErrIncompatibleVersion is returned when attempting to read from an
-// incompatible tsi1 manifest file.
-var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST")
-
+// Type returns the type of Index this is.
func (i *Index) Type() string { return IndexName }
+// SeriesFile returns the series file attached to the index.
+func (i *Index) SeriesFile() *tsdb.SeriesFile { return i.sfile }
+
+// SeriesIDSet returns the set of series ids associated with series in this
+// index. Any series IDs for series no longer present in the index are filtered out.
+func (i *Index) SeriesIDSet() *tsdb.SeriesIDSet {
+ seriesIDSet := tsdb.NewSeriesIDSet()
+ others := make([]*tsdb.SeriesIDSet, 0, i.PartitionN)
+ for _, p := range i.partitions {
+ others = append(others, p.seriesSet)
+ }
+ seriesIDSet.Merge(others...)
+ return seriesIDSet
+}
+
// Open opens the index.
func (i *Index) Open() error {
i.mu.Lock()
@@ -135,1325 +158,672 @@ func (i *Index) Open() error {
return errors.New("index already open")
}
- // Create directory if it doesn't exist.
- if err := os.MkdirAll(i.Path, 0777); err != nil {
+ // Ensure root exists.
+ if err := os.MkdirAll(i.path, 0777); err != nil {
return err
}
- // Read manifest file.
- m, err := ReadManifestFile(i.ManifestPath())
- if os.IsNotExist(err) {
- m = NewManifest(i.ManifestPath())
- } else if err != nil {
- return err
+ // Inititalise index partitions.
+ i.partitions = make([]*Partition, i.PartitionN)
+ for j := 0; j < len(i.partitions); j++ {
+ p := NewPartition(i.sfile, filepath.Join(i.path, fmt.Sprint(j)))
+ p.Database = i.database
+ p.compactionsDisabled = i.disableCompactions
+ p.logger = i.logger.With(zap.String("partition", fmt.Sprint(j+1)))
+ i.partitions[j] = p
}
- // Check to see if the MANIFEST file is compatible with the current Index.
- if err := m.Validate(); err != nil {
- return err
- }
+ // Open all the Partitions in parallel.
+ partitionN := len(i.partitions)
+ n := i.availableThreads()
- // Copy compaction levels to the index.
- i.levels = make([]CompactionLevel, len(m.Levels))
- copy(i.levels, m.Levels)
-
- // Set up flags to track whether a level is compacting.
- i.levelCompacting = make([]bool, len(i.levels))
-
- // Open each file in the manifest.
- var files []File
- for _, filename := range m.Files {
- switch filepath.Ext(filename) {
- case LogFileExt:
- f, err := i.openLogFile(filepath.Join(i.Path, filename))
- if err != nil {
- return err
- }
- files = append(files, f)
+ // Store results.
+ errC := make(chan error, partitionN)
- // Make first log file active, if within threshold.
- sz, _ := f.Stat()
- if i.activeLogFile == nil && sz < i.MaxLogFileSize {
- i.activeLogFile = f
- }
-
- case IndexFileExt:
- f, err := i.openIndexFile(filepath.Join(i.Path, filename))
- if err != nil {
- return err
+ // Run fn on each partition using a fixed number of goroutines.
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func(k int) {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
+ if idx >= partitionN {
+ return // No more work.
+ }
+ err := i.partitions[idx].Open()
+ errC <- err
}
- files = append(files, f)
- }
- }
- fs, err := NewFileSet(i.Database, i.levels, files)
- if err != nil {
- return err
- }
- fs.manifestSize = m.size
- i.fileSet = fs
-
- // Set initial sequnce number.
- i.seq = i.fileSet.MaxID()
-
- // Delete any files not in the manifest.
- if err := i.deleteNonManifestFiles(m); err != nil {
- return err
+ }(k)
}
- // Ensure a log file exists.
- if i.activeLogFile == nil {
- if err := i.prependActiveLogFile(); err != nil {
+ // Check for error
+ for i := 0; i < partitionN; i++ {
+ if err := <-errC; err != nil {
return err
}
}
// Mark opened.
i.opened = true
-
- // Send a compaction request on start up.
- i.compact()
-
+ i.logger.Info(fmt.Sprintf("index opened with %d partitions", partitionN))
return nil
}
-// openLogFile opens a log file and appends it to the index.
-func (i *Index) openLogFile(path string) (*LogFile, error) {
- f := NewLogFile(path)
- if err := f.Open(); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// openIndexFile opens a log file and appends it to the index.
-func (i *Index) openIndexFile(path string) (*IndexFile, error) {
- f := NewIndexFile()
- f.SetPath(path)
- if err := f.Open(); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// deleteNonManifestFiles removes all files not in the manifest.
-func (i *Index) deleteNonManifestFiles(m *Manifest) error {
- dir, err := os.Open(i.Path)
- if err != nil {
- return err
- }
- defer dir.Close()
-
- fis, err := dir.Readdir(-1)
- if err != nil {
- return err
- }
-
- // Loop over all files and remove any not in the manifest.
- for _, fi := range fis {
- filename := filepath.Base(fi.Name())
- if filename == ManifestFileName || m.HasFile(filename) {
- continue
- }
-
- if err := os.RemoveAll(filename); err != nil {
- return err
- }
+// Compact requests a compaction of partitions.
+func (i *Index) Compact() {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for _, p := range i.partitions {
+ p.Compact()
}
-
- return nil
}
-// Wait returns once outstanding compactions have finished.
+// Wait blocks until all outstanding compactions have completed.
func (i *Index) Wait() {
- i.wg.Wait()
+ for _, p := range i.partitions {
+ p.Wait()
+ }
}
// Close closes the index.
func (i *Index) Close() error {
- // Wait for goroutines to finish.
- i.once.Do(func() { close(i.closing) })
- i.wg.Wait()
-
- // Lock index and close remaining
+ // Lock index and close partitions.
i.mu.Lock()
defer i.mu.Unlock()
- // Close log files.
- for _, f := range i.fileSet.files {
- f.Close()
+ for _, p := range i.partitions {
+ if err := p.Close(); err != nil {
+ return err
+ }
}
- i.fileSet.files = nil
return nil
}
-// NextSequence returns the next file identifier.
-func (i *Index) NextSequence() int {
- i.mu.Lock()
- defer i.mu.Unlock()
- return i.nextSequence()
-}
+// Path returns the path the index was opened with.
+func (i *Index) Path() string { return i.path }
-func (i *Index) nextSequence() int {
- i.seq++
- return i.seq
+// PartitionAt returns the partition by index.
+func (i *Index) PartitionAt(index int) *Partition {
+ return i.partitions[index]
}
-// ManifestPath returns the path to the index's manifest file.
-func (i *Index) ManifestPath() string {
- return filepath.Join(i.Path, ManifestFileName)
+// partition returns the appropriate Partition for a provided series key.
+func (i *Index) partition(key []byte) *Partition {
+ return i.partitions[int(xxhash.Sum64(key)&(i.PartitionN-1))]
}
-// Manifest returns a manifest for the index.
-func (i *Index) Manifest() *Manifest {
- m := &Manifest{
- Levels: i.levels,
- Files: make([]string, len(i.fileSet.files)),
- Version: i.version,
- path: i.ManifestPath(),
- }
-
- for j, f := range i.fileSet.files {
- m.Files[j] = filepath.Base(f.Path())
- }
-
- return m
+// partitionIdx returns the index of the partition that key belongs in.
+func (i *Index) partitionIdx(key []byte) int {
+ return int(xxhash.Sum64(key) & (i.PartitionN - 1))
}
-// WithLogger sets the logger for the index.
-func (i *Index) WithLogger(logger *zap.Logger) {
- i.mu.Lock()
- i.logger = logger.With(zap.String("index", "tsi"))
- i.mu.Unlock()
+// availableThreads returns the minimum of GOMAXPROCS and the number of
+// partitions in the Index.
+func (i *Index) availableThreads() int {
+ n := runtime.GOMAXPROCS(0)
+ if len(i.partitions) < n {
+ return len(i.partitions)
+ }
+ return n
}
// SetFieldSet sets a shared field set from the engine.
func (i *Index) SetFieldSet(fs *tsdb.MeasurementFieldSet) {
- i.mu.Lock()
- i.fieldset = fs
- i.mu.Unlock()
-}
-
-// RetainFileSet returns the current fileset and adds a reference count.
-func (i *Index) RetainFileSet() *FileSet {
- i.mu.RLock()
- fs := i.retainFileSet()
- i.mu.RUnlock()
- return fs
-}
-
-func (i *Index) retainFileSet() *FileSet {
- fs := i.fileSet
- fs.Retain()
- return fs
-}
-
-// FileN returns the active files in the file set.
-func (i *Index) FileN() int { return len(i.fileSet.files) }
-
-// prependActiveLogFile adds a new log file so that the current log file can be compacted.
-func (i *Index) prependActiveLogFile() error {
- // Open file and insert it into the first position.
- f, err := i.openLogFile(filepath.Join(i.Path, FormatLogFileName(i.nextSequence())))
- if err != nil {
- return err
+ for _, p := range i.partitions {
+ p.SetFieldSet(fs)
}
- i.activeLogFile = f
-
- // Prepend and generate new fileset.
- i.fileSet = i.fileSet.PrependLogFile(f)
+}
- // Write new manifest.
- m := i.Manifest()
- if err = m.Write(); err != nil {
- // TODO: Close index if write fails.
- return err
+// FieldSet returns the assigned fieldset.
+func (i *Index) FieldSet() *tsdb.MeasurementFieldSet {
+ if len(i.partitions) == 0 {
+ return nil
}
- i.fileSet.manifestSize = m.size
-
- return nil
+ return i.partitions[0].FieldSet()
}
-// ForEachMeasurementName iterates over all measurement names in the index.
+// ForEachMeasurementName iterates over all measurement names in the index,
+// applying fn. It returns the first error encountered, if any.
+//
+// ForEachMeasurementName does not call fn on each partition concurrently so the
+// call may provide a non-goroutine safe fn.
func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr := fs.MeasurementIterator()
- if itr == nil {
+ itr, err := i.MeasurementIterator()
+ if err != nil {
+ return err
+ } else if itr == nil {
return nil
}
+ defer itr.Close()
- for e := itr.Next(); e != nil; e = itr.Next() {
- if err := fn(e.Name()); err != nil {
+ // Iterate over all measurements.
+ for {
+ e, err := itr.Next()
+ if err != nil {
return err
+ } else if e == nil {
+ break
}
- }
+ if err := fn(e); err != nil {
+ return err
+ }
+ }
return nil
}
// MeasurementExists returns true if a measurement exists.
func (i *Index) MeasurementExists(name []byte) (bool, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
- m := fs.Measurement(name)
- return m != nil && !m.Deleted(), nil
-}
-
-func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- names, err := fs.MeasurementNamesByExpr(auth, expr)
-
- // Clone byte slices since they will be used after the fileset is released.
- return bytesutil.CloneSlice(names), err
-}
-
-func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr := fs.MeasurementIterator()
- if itr == nil {
- return nil, nil
- }
-
- var a [][]byte
- for e := itr.Next(); e != nil; e = itr.Next() {
- if re.Match(e.Name()) {
- // Clone bytes since they will be used after the fileset is released.
- a = append(a, bytesutil.Clone(e.Name()))
- }
- }
- return a, nil
-}
-
-// DropMeasurement deletes a measurement from the index.
-func (i *Index) DropMeasurement(name []byte) error {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- // Delete all keys and values.
- if kitr := fs.TagKeyIterator(name); kitr != nil {
- for k := kitr.Next(); k != nil; k = kitr.Next() {
- // Delete key if not already deleted.
- if !k.Deleted() {
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
- return i.activeLogFile.DeleteTagKey(name, k.Key())
- }(); err != nil {
- return err
+ n := i.availableThreads()
+
+ // Store errors
+ var found uint32 // Use this to signal we found the measurement.
+ errC := make(chan error, i.PartitionN)
+
+ // Check each partition for the measurement concurrently.
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
+ if idx >= len(i.partitions) {
+ return // No more work.
}
- }
- // Delete each value in key.
- if vitr := k.TagValueIterator(); vitr != nil {
- for v := vitr.Next(); v != nil; v = vitr.Next() {
- if !v.Deleted() {
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
- return i.activeLogFile.DeleteTagValue(name, k.Key(), v.Value())
- }(); err != nil {
- return err
- }
- }
+ // Check if the measurement has been found. If it has don't
+ // need to check this partition and can just move on.
+ if atomic.LoadUint32(&found) == 1 {
+ errC <- nil
+ continue
}
- }
- }
- }
- // Delete all series in measurement.
- if sitr := fs.MeasurementSeriesIterator(name); sitr != nil {
- for s := sitr.Next(); s != nil; s = sitr.Next() {
- if !s.Deleted() {
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
- return i.activeLogFile.DeleteSeries(s.Name(), s.Tags())
- }(); err != nil {
- return err
+ b, err := i.partitions[idx].MeasurementExists(name)
+ if b {
+ atomic.StoreUint32(&found, 1)
}
+ errC <- err
}
- }
+ }()
}
- // Mark measurement as deleted.
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
- return i.activeLogFile.DeleteMeasurement(name)
- }(); err != nil {
- return err
- }
-
- // Check if the log file needs to be swapped.
- if err := i.CheckLogFile(); err != nil {
- return err
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return false, err
+ }
}
- return nil
+ // Check if we found the measurement.
+ return atomic.LoadUint32(&found) == 1, nil
}
-// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.
-func (i *Index) CreateSeriesListIfNotExists(_, names [][]byte, tagsSlice []models.Tags) error {
- // All slices must be of equal length.
- if len(names) != len(tagsSlice) {
- return errors.New("names/tags length mismatch")
- }
-
- // Maintain reference count on files in file set.
- fs := i.RetainFileSet()
- defer fs.Release()
+// fetchByteValues is a helper for gathering values from each partition in the index,
+// based on some criteria.
+//
+// fn is a function that works on partition idx and calls into some method on
+// the partition that returns some ordered values.
+func (i *Index) fetchByteValues(fn func(idx int) ([][]byte, error)) ([][]byte, error) {
+ n := i.availableThreads()
+
+ // Store results.
+ names := make([][][]byte, i.PartitionN)
+ errC := make(chan error, i.PartitionN)
+
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
+ if idx >= len(i.partitions) {
+ return // No more work.
+ }
- // Filter out existing series. Exit if no new series exist.
- names, tagsSlice = fs.FilterNamesTags(names, tagsSlice)
- if len(names) == 0 {
- return nil
- }
+ pnames, err := fn(idx)
- // Ensure fileset cannot change during insert.
- i.mu.RLock()
- // Insert series into log file.
- if err := i.activeLogFile.AddSeriesList(names, tagsSlice); err != nil {
- i.mu.RUnlock()
- return err
+ // This is safe since there are no readers on names until all
+ // the writers are done.
+ names[idx] = pnames
+ errC <- err
+ }
+ }()
}
- i.mu.RUnlock()
- return i.CheckLogFile()
-}
-
-// InitializeSeries is a no-op. This only applies to the in-memory index.
-func (i *Index) InitializeSeries(key, name []byte, tags models.Tags) error {
- return nil
-}
-
-// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted.
-func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
-
- fs := i.retainFileSet()
- defer fs.Release()
-
- if fs.HasSeries(name, tags, nil) {
- return nil
- }
-
- if err := i.activeLogFile.AddSeries(name, tags); err != nil {
- return err
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return nil, err
}
- return nil
- }(); err != nil {
- return err
}
- // Swap log file, if necesssary.
- if err := i.CheckLogFile(); err != nil {
- return err
- }
- return nil
+ // It's now safe to read from names.
+ return slices.MergeSortedBytes(names[:]...), nil
}
-func (i *Index) DropSeries(key []byte, ts int64) error {
- if err := func() error {
- i.mu.RLock()
- defer i.mu.RUnlock()
-
- name, tags := models.ParseKey(key)
-
- mname := []byte(name)
- if err := i.activeLogFile.DeleteSeries(mname, tags); err != nil {
- return err
- }
-
- // Obtain file set after deletion because that may add a new log file.
- fs := i.retainFileSet()
- defer fs.Release()
-
- mm := fs.Measurement(mname)
- if mm == nil || mm.HasSeries() {
- return nil
- }
-
- // If no more series exist in the measurement then delete the measurement.
- if err := i.activeLogFile.DeleteMeasurement(mname); err != nil {
- return err
+// MeasurementIterator returns an iterator over all measurements.
+func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) {
+ itrs := make([]tsdb.MeasurementIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr, err := p.MeasurementIterator()
+ if err != nil {
+ tsdb.MeasurementIterators(itrs).Close()
+ return nil, err
+ } else if itr != nil {
+ itrs = append(itrs, itr)
}
- return nil
- }(); err != nil {
- return err
- }
-
- // Swap log file, if necesssary.
- if err := i.CheckLogFile(); err != nil {
- return err
}
- return nil
-}
-
-// SeriesSketches returns the two sketches for the index by merging all
-// instances sketches from TSI files and the WAL.
-func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
- return fs.SeriesSketches()
-}
-
-// MeasurementsSketches returns the two sketches for the index by merging all
-// instances of the type sketch types in all the index files.
-func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
- return fs.MeasurementsSketches()
+ return tsdb.MergeMeasurementIterators(itrs...), nil
}
-// SeriesN returns the number of unique non-tombstoned series in the index.
-// Since indexes are not shared across shards, the count returned by SeriesN
-// cannot be combined with other shard's results. If you need to count series
-// across indexes then use SeriesSketches and merge the results from other
-// indexes.
-func (i *Index) SeriesN() int64 {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- var total int64
- for _, f := range fs.files {
- total += int64(f.SeriesN())
- }
- return total
-}
-
-// HasTagKey returns true if tag key exists.
-func (i *Index) HasTagKey(name, key []byte) (bool, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
- return fs.HasTagKey(name, key), nil
-}
-
-// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
-func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
- return fs.MeasurementTagKeysByExpr(name, expr)
-}
-
-// TagKeyHasAuthorizedSeries determines if there exist authorized series for the
-// provided measurement name and tag key.
-func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr := fs.TagValueIterator(name, []byte(key))
- for val := itr.Next(); val != nil; val = itr.Next() {
- if auth == nil || auth == query.OpenAuthorizer {
- return true
- }
-
- // Identify an authorized series.
- si := fs.TagValueSeriesIterator(name, []byte(key), val.Value())
- for se := si.Next(); se != nil; se = si.Next() {
- if auth.AuthorizeSeriesRead(i.Database, se.Name(), se.Tags()) {
- return true
- }
+// MeasurementSeriesIDIterator returns an iterator over all series in a measurement.
+func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
+ itrs := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr, err := p.MeasurementSeriesIDIterator(name)
+ if err != nil {
+ tsdb.SeriesIDIterators(itrs).Close()
+ return nil, err
+ } else if itr != nil {
+ itrs = append(itrs, itr)
}
}
- return false
+ return tsdb.MergeSeriesIDIterators(itrs...), nil
}
-// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.
-//
-// See tsm1.Engine.MeasurementTagKeyValuesByExpr for a fuller description of this
-// method.
-func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- if len(keys) == 0 {
- return nil, nil
- }
-
- results := make([][]string, len(keys))
- // If we haven't been provided sorted keys, then we need to sort them.
- if !keysSorted {
- sort.Sort(sort.StringSlice(keys))
- }
+// MeasurementNamesByRegex returns measurement names for the provided regex.
+func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
+ return i.fetchByteValues(func(idx int) ([][]byte, error) {
+ return i.partitions[idx].MeasurementNamesByRegex(re)
+ })
+}
- // No expression means that the values shouldn't be filtered, so we can
- // fetch them all.
- if expr == nil {
- for ki, key := range keys {
- itr := fs.TagValueIterator(name, []byte(key))
- if auth != nil {
- for val := itr.Next(); val != nil; val = itr.Next() {
- si := fs.TagValueSeriesIterator(name, []byte(key), val.Value())
- for se := si.Next(); se != nil; se = si.Next() {
- if auth.AuthorizeSeriesRead(i.Database, se.Name(), se.Tags()) {
- results[ki] = append(results[ki], string(val.Value()))
- break
- }
- }
- }
- } else {
- for val := itr.Next(); val != nil; val = itr.Next() {
- results[ki] = append(results[ki], string(val.Value()))
+// DropMeasurement deletes a measurement from the index. It returns the first
+// error encountered, if any.
+func (i *Index) DropMeasurement(name []byte) error {
+ n := i.availableThreads()
+
+ // Store results.
+ errC := make(chan error, i.PartitionN)
+
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
+ if idx >= len(i.partitions) {
+ return // No more work.
}
+ errC <- i.partitions[idx].DropMeasurement(name)
}
- }
- return results, nil
- }
-
- // This is the case where we have filtered series by some WHERE condition.
- // We only care about the tag values for the keys given the
- // filtered set of series ids.
- resultSet, err := fs.tagValuesByKeyAndExpr(auth, name, keys, expr, i.fieldset)
- if err != nil {
- return nil, err
- }
-
- // Convert result sets into []string
- for i, s := range resultSet {
- values := make([]string, 0, len(s))
- for v := range s {
- values = append(values, v)
- }
- sort.Sort(sort.StringSlice(values))
- results[i] = values
- }
- return results, nil
-}
-
-// ForEachMeasurementTagKey iterates over all tag keys in a measurement.
-func (i *Index) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr := fs.TagKeyIterator(name)
- if itr == nil {
- return nil
+ }()
}
- for e := itr.Next(); e != nil; e = itr.Next() {
- if err := fn(e.Key()); err != nil {
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
return err
}
}
-
return nil
}
-// TagKeyCardinality always returns zero.
-// It is not possible to determine cardinality of tags across index files.
-func (i *Index) TagKeyCardinality(name, key []byte) int {
- return 0
-}
-
-func (i *Index) MeasurementSeriesKeysByExprIterator(name []byte, condition influxql.Expr) (tsdb.SeriesIterator, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr, err := fs.MeasurementSeriesByExprIterator(name, condition, i.fieldset)
- if err != nil {
- return nil, err
- } else if itr == nil {
- return nil, nil
+// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.
+func (i *Index) CreateSeriesListIfNotExists(_ [][]byte, names [][]byte, tagsSlice []models.Tags) error {
+ // All slices must be of equal length.
+ if len(names) != len(tagsSlice) {
+ return errors.New("names/tags length mismatch in index")
}
- return itr, err
-}
-// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.
-func (i *Index) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- keys, err := fs.MeasurementSeriesKeysByExpr(name, expr, i.fieldset)
+ // We need to move different series into collections for each partition
+ // to process.
+ pNames := make([][][]byte, i.PartitionN)
+ pTags := make([][]models.Tags, i.PartitionN)
- // Clone byte slices since they will be used after the fileset is released.
- return bytesutil.CloneSlice(keys), err
-}
+ // Determine partition for series using each series key.
+ buf := make([]byte, 2048)
+ for k, _ := range names {
+ buf = tsdb.AppendSeriesKey(buf[:0], names[k], tagsSlice[k])
-// TagSets returns an ordered list of tag sets for a measurement by dimension
-// and filtered by an optional conditional expression.
-func (i *Index) TagSets(name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) {
- fs := i.RetainFileSet()
- defer fs.Release()
-
- itr, err := fs.MeasurementSeriesByExprIterator(name, opt.Condition, i.fieldset)
- if err != nil {
- return nil, err
- } else if itr == nil {
- return nil, nil
+ pidx := i.partitionIdx(buf)
+ pNames[pidx] = append(pNames[pidx], names[k])
+ pTags[pidx] = append(pTags[pidx], tagsSlice[k])
}
- // For every series, get the tag values for the requested tag keys i.e.
- // dimensions. This is the TagSet for that series. Series with the same
- // TagSet are then grouped together, because for the purpose of GROUP BY
- // they are part of the same composite series.
- tagSets := make(map[string]*query.TagSet, 64)
- var seriesN int
-
- if itr != nil {
- for e := itr.Next(); e != nil; e = itr.Next() {
- // Abort if the query was killed
- select {
- case <-opt.InterruptCh:
- return nil, query.ErrQueryInterrupted
- default:
- }
+ // Process each subset of series on each partition.
+ n := i.availableThreads()
- if opt.MaxSeriesN > 0 && seriesN > opt.MaxSeriesN {
- return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", seriesN, opt.MaxSeriesN)
- }
-
- if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(i.Database, name, e.Tags()) {
- continue
- }
+ // Store errors.
+ errC := make(chan error, i.PartitionN)
- tags := make(map[string]string, len(opt.Dimensions))
-
- // Build the TagSet for this series.
- for _, dim := range opt.Dimensions {
- tags[dim] = e.Tags().GetString(dim)
- }
-
- // Convert the TagSet to a string, so it can be added to a map
- // allowing TagSets to be handled as a set.
- tagsAsKey := tsdb.MarshalTags(tags)
- tagSet, ok := tagSets[string(tagsAsKey)]
- if !ok {
- // This TagSet is new, create a new entry for it.
- tagSet = &query.TagSet{
- Tags: tags,
- Key: tagsAsKey,
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
+ if idx >= len(i.partitions) {
+ return // No more work.
}
+ errC <- i.partitions[idx].createSeriesListIfNotExists(pNames[idx], pTags[idx])
}
- // Associate the series and filter with the Tagset.
- tagSet.AddFilter(string(models.MakeKey(e.Name(), e.Tags())), e.Expr())
-
- // Ensure it's back in the map.
- tagSets[string(tagsAsKey)] = tagSet
- seriesN++
- }
- }
-
- // Sort the series in each tag set.
- for _, t := range tagSets {
- // Abort if the query was killed
- select {
- case <-opt.InterruptCh:
- return nil, query.ErrQueryInterrupted
- default:
- }
-
- sort.Sort(t)
- }
-
- // The TagSets have been created, as a map of TagSets. Just send
- // the values back as a slice, sorting for consistency.
- sortedTagsSets := make([]*query.TagSet, 0, len(tagSets))
- for _, v := range tagSets {
- sortedTagsSets = append(sortedTagsSets, v)
- }
- sort.Sort(byTagKey(sortedTagsSets))
-
- return sortedTagsSets, nil
-}
-
-// DiskSizeBytes returns the size of the index on disk.
-func (i *Index) DiskSizeBytes() int64 {
- fs := i.RetainFileSet()
- defer fs.Release()
- return fs.Size()
-}
-
-// SnapshotTo creates hard links to the file set into path.
-func (i *Index) SnapshotTo(path string) error {
- i.mu.Lock()
- defer i.mu.Unlock()
-
- fs := i.retainFileSet()
- defer fs.Release()
-
- // Flush active log file, if any.
- if err := i.activeLogFile.Flush(); err != nil {
- return err
- }
-
- if err := os.Mkdir(filepath.Join(path, "index"), 0777); err != nil {
- return err
+ }()
}
- // Link manifest.
- if err := os.Link(i.ManifestPath(), filepath.Join(path, "index", filepath.Base(i.ManifestPath()))); err != nil {
- return fmt.Errorf("error creating tsi manifest hard link: %q", err)
- }
-
- // Link files in directory.
- for _, f := range fs.files {
- if err := os.Link(f.Path(), filepath.Join(path, "index", filepath.Base(f.Path()))); err != nil {
- return fmt.Errorf("error creating tsi hard link: %q", err)
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return err
}
}
-
return nil
}
-func (i *Index) SetFieldName(measurement []byte, name string) {}
-func (i *Index) RemoveShard(shardID uint64) {}
-func (i *Index) AssignShard(k string, shardID uint64) {}
-
-func (i *Index) UnassignShard(k string, shardID uint64, ts int64) error {
- // This can be called directly once inmem is gone.
- return i.DropSeries([]byte(k), ts)
-}
-
-// SeriesPointIterator returns an influxql iterator over all series.
-func (i *Index) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) {
- // NOTE: The iterator handles releasing the file set.
- fs := i.RetainFileSet()
- return newSeriesPointIterator(fs, i.fieldset, opt), nil
-}
-
-// Compact requests a compaction of log files.
-func (i *Index) Compact() {
- i.mu.Lock()
- defer i.mu.Unlock()
- i.compact()
+// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted.
+func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {
+ return i.partition(key).createSeriesListIfNotExists([][]byte{name}, []models.Tags{tags})
}
-// compact compacts continguous groups of files that are not currently compacting.
-func (i *Index) compact() {
- if !i.CompactionEnabled {
- return
- }
-
- fs := i.retainFileSet()
- defer fs.Release()
-
- // Iterate over each level we are going to compact.
- // We skip the first level (0) because it is log files and they are compacted separately.
- // We skip the last level because the files have no higher level to compact into.
- minLevel, maxLevel := 1, len(i.levels)-2
- for level := minLevel; level <= maxLevel; level++ {
- // Skip level if it is currently compacting.
- if i.levelCompacting[level] {
- continue
- }
-
- // Collect contiguous files from the end of the level.
- files := fs.LastContiguousIndexFilesByLevel(level)
- if len(files) < 2 {
- continue
- } else if len(files) > MaxIndexMergeCount {
- files = files[len(files)-MaxIndexMergeCount:]
- }
-
- // Retain files during compaction.
- IndexFiles(files).Retain()
-
- // Mark the level as compacting.
- i.levelCompacting[level] = true
-
- // Execute in closure to save reference to the group within the loop.
- func(files []*IndexFile, level int) {
- // Start compacting in a separate goroutine.
- i.wg.Add(1)
- go func() {
- defer i.wg.Done()
-
- // Compact to a new level.
- i.compactToLevel(files, level+1)
-
- // Ensure compaction lock for the level is released.
- i.mu.Lock()
- i.levelCompacting[level] = false
- i.mu.Unlock()
-
- // Check for new compactions
- i.Compact()
- }()
- }(files, level)
- }
+// InitializeSeries is a no-op. This only applies to the in-memory index.
+func (i *Index) InitializeSeries(key, name []byte, tags models.Tags) error {
+ return nil
}
-// compactToLevel compacts a set of files into a new file. Replaces old files with
-// compacted file on successful completion. This runs in a separate goroutine.
-func (i *Index) compactToLevel(files []*IndexFile, level int) {
- assert(len(files) >= 2, "at least two index files are required for compaction")
- assert(level > 0, "cannot compact level zero")
-
- // Build a logger for this compaction.
- logger := i.logger.With(zap.String("token", generateCompactionToken()))
-
- // Files have already been retained by caller.
- // Ensure files are released only once.
- var once sync.Once
- defer once.Do(func() { IndexFiles(files).Release() })
-
- // Track time to compact.
- start := time.Now()
-
- // Create new index file.
- path := filepath.Join(i.Path, FormatIndexFileName(i.NextSequence(), level))
- f, err := os.Create(path)
- if err != nil {
- logger.Error("cannot create compation files", zap.Error(err))
- return
+// DropSeries drops the provided series from the index.
+func (i *Index) DropSeries(key []byte, ts int64) error {
+ // Remove from partition.
+ if err := i.partition(key).DropSeries(key, ts); err != nil {
+ return err
}
- defer f.Close()
- logger.Info("performing full compaction",
- zap.String("src", joinIntSlice(IndexFiles(files).IDs(), ",")),
- zap.String("dst", path),
- )
+ // Extract measurement name.
+ name, _ := models.ParseKey(key)
+ mname := []byte(name)
- // Compact all index files to new index file.
- lvl := i.levels[level]
- n, err := IndexFiles(files).CompactTo(f, lvl.M, lvl.K)
+ // Check if that was the last series for the measurement in the entire index.
+ itr, err := i.MeasurementSeriesIDIterator(mname)
if err != nil {
- logger.Error("cannot compact index files", zap.Error(err))
- return
- }
-
- // Close file.
- if err := f.Close(); err != nil {
- logger.Error("error closing index file", zap.Error(err))
- return
- }
-
- // Reopen as an index file.
- file := NewIndexFile()
- file.SetPath(path)
- if err := file.Open(); err != nil {
- logger.Error("cannot open new index file", zap.Error(err))
- return
- }
-
- // Obtain lock to swap in index file and write manifest.
- if err := func() error {
- i.mu.Lock()
- defer i.mu.Unlock()
-
- // Replace previous files with new index file.
- i.fileSet = i.fileSet.MustReplace(IndexFiles(files).Files(), file)
-
- // Write new manifest.
- var err error
- m := i.Manifest()
- if err = m.Write(); err != nil {
- // TODO: Close index if write fails.
- return err
- }
- i.fileSet.manifestSize = m.size
- return nil
- }(); err != nil {
- logger.Error("cannot write manifest", zap.Error(err))
- return
- }
-
- elapsed := time.Since(start)
- logger.Info("full compaction complete",
- zap.String("path", path),
- zap.String("elapsed", elapsed.String()),
- zap.Int64("bytes", n),
- zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024),
- )
-
- // Release old files.
- once.Do(func() { IndexFiles(files).Release() })
-
- // Close and delete all old index files.
- for _, f := range files {
- logger.Info("removing index file", zap.String("path", f.Path()))
-
- if err := f.Close(); err != nil {
- logger.Error("cannot close index file", zap.Error(err))
- return
- } else if err := os.Remove(f.Path()); err != nil {
- logger.Error("cannot remove index file", zap.Error(err))
- return
- }
- }
-}
-
-func (i *Index) Rebuild() {}
-
-func (i *Index) CheckLogFile() error {
- // Check log file size under read lock.
- if size := func() int64 {
- i.mu.RLock()
- defer i.mu.RUnlock()
- return i.activeLogFile.Size()
- }(); size < i.MaxLogFileSize {
+ return err
+ } else if itr == nil {
return nil
}
+ itr = tsdb.FilterUndeletedSeriesIDIterator(i.sfile, itr)
+ defer itr.Close()
- // If file size exceeded then recheck under write lock and swap files.
- i.mu.Lock()
- defer i.mu.Unlock()
- return i.checkLogFile()
-}
-
-func (i *Index) checkLogFile() error {
- if i.activeLogFile.Size() < i.MaxLogFileSize {
+ if e, err := itr.Next(); err != nil {
+ return err
+ } else if e.SeriesID != 0 {
return nil
}
- // Swap current log file.
- logFile := i.activeLogFile
-
- // Open new log file and insert it into the first position.
- if err := i.prependActiveLogFile(); err != nil {
+ // If no more series exist in the measurement then delete the measurement.
+ if err := i.DropMeasurement(mname); err != nil {
return err
}
-
- // Begin compacting in a background goroutine.
- i.wg.Add(1)
- go func() {
- defer i.wg.Done()
- i.compactLogFile(logFile)
- i.Compact() // check for new compactions
- }()
-
return nil
}
-// compactLogFile compacts f into a tsi file. The new file will share the
-// same identifier but will have a ".tsi" extension. Once the log file is
-// compacted then the manifest is updated and the log file is discarded.
-func (i *Index) compactLogFile(logFile *LogFile) {
- start := time.Now()
-
- // Retrieve identifier from current path.
- id := logFile.ID()
- assert(id != 0, "cannot parse log file id: %s", logFile.Path())
-
- // Build a logger for this compaction.
- logger := i.logger.With(
- zap.String("token", generateCompactionToken()),
- zap.Int("id", id),
- )
-
- // Create new index file.
- path := filepath.Join(i.Path, FormatIndexFileName(id, 1))
- f, err := os.Create(path)
- if err != nil {
- logger.Error("cannot create index file", zap.Error(err))
- return
- }
- defer f.Close()
+// MeasurementsSketches returns the two sketches for the index by merging all
+// instances of the type sketch types in all the index files.
+func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
+ s, ts := hll.NewDefaultPlus(), hll.NewDefaultPlus()
+ for _, p := range i.partitions {
+ // Get partition's measurement sketches and merge.
+ ps, pts, err := p.MeasurementsSketches()
+ if err != nil {
+ return nil, nil, err
+ }
- // Compact log file to new index file.
- lvl := i.levels[1]
- n, err := logFile.CompactTo(f, lvl.M, lvl.K)
- if err != nil {
- logger.Error("cannot compact log file", zap.Error(err), zap.String("path", logFile.Path()))
- return
+ if err := s.Merge(ps); err != nil {
+ return nil, nil, err
+ } else if err := ts.Merge(pts); err != nil {
+ return nil, nil, err
+ }
}
- // Close file.
- if err := f.Close(); err != nil {
- logger.Error("cannot close log file", zap.Error(err))
- return
- }
+ return s, ts, nil
+}
- // Reopen as an index file.
- file := NewIndexFile()
- file.SetPath(path)
- if err := file.Open(); err != nil {
- logger.Error("cannot open compacted index file", zap.Error(err), zap.String("path", file.Path()))
- return
- }
+// SeriesN returns the number of unique non-tombstoned series in the index.
+// Since indexes are not shared across shards, the count returned by SeriesN
+// cannot be combined with other shard's results. If you need to count series
+// across indexes then use SeriesSketches and merge the results from other
+// indexes.
+func (i *Index) SeriesN() int64 {
+ return int64(i.sfile.SeriesCount())
+}
- // Obtain lock to swap in index file and write manifest.
- if err := func() error {
- i.mu.Lock()
- defer i.mu.Unlock()
+// HasTagKey returns true if tag key exists. It returns the first error
+// encountered if any.
+func (i *Index) HasTagKey(name, key []byte) (bool, error) {
+ n := i.availableThreads()
+
+ // Store errors
+ var found uint32 // Use this to signal we found the tag key.
+ errC := make(chan error, i.PartitionN)
+
+ // Check each partition for the tag key concurrently.
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
+ if idx >= len(i.partitions) {
+ return // No more work.
+ }
- // Replace previous log file with index file.
- i.fileSet = i.fileSet.MustReplace([]File{logFile}, file)
+ // Check if the tag key has already been found. If it has, we
+ // don't need to check this partition and can just move on.
+ if atomic.LoadUint32(&found) == 1 {
+ errC <- nil
+ continue
+ }
- // Write new manifest.
- var err error
- m := i.Manifest()
- if err = m.Write(); err != nil {
- // TODO: Close index if write fails.
- return err
- }
- i.fileSet.manifestSize = m.size
- return nil
- }(); err != nil {
- logger.Error("cannot update manifest", zap.Error(err))
- return
+ b, err := i.partitions[idx].HasTagKey(name, key)
+ if b {
+ atomic.StoreUint32(&found, 1)
+ }
+ errC <- err
+ }
+ }()
}
- elapsed := time.Since(start)
- logger.Error("log file compacted",
- zap.String("elapsed", elapsed.String()),
- zap.Int64("bytes", n),
- zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024),
- )
-
- // Closing the log file will automatically wait until the ref count is zero.
- if err := logFile.Close(); err != nil {
- logger.Error("cannot close log file", zap.Error(err))
- return
- } else if err := os.Remove(logFile.Path()); err != nil {
- logger.Error("cannot remove log file", zap.Error(err))
- return
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return false, err
+ }
}
- return
-}
-
-// seriesPointIterator adapts SeriesIterator to an influxql.Iterator.
-type seriesPointIterator struct {
- once sync.Once
- fs *FileSet
- fieldset *tsdb.MeasurementFieldSet
- mitr MeasurementIterator
- sitr tsdb.SeriesIterator
- opt query.IteratorOptions
-
- point query.FloatPoint // reusable point
-}
-
-// newSeriesPointIterator returns a new instance of seriesPointIterator.
-func newSeriesPointIterator(fs *FileSet, fieldset *tsdb.MeasurementFieldSet, opt query.IteratorOptions) *seriesPointIterator {
- return &seriesPointIterator{
- fs: fs,
- fieldset: fieldset,
- mitr: fs.MeasurementIterator(),
- point: query.FloatPoint{
- Aux: make([]interface{}, len(opt.Aux)),
- },
- opt: opt,
- }
+ // Check if we found the tag key.
+ return atomic.LoadUint32(&found) == 1, nil
}
-// Stats returns stats about the points processed.
-func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} }
+// HasTagValue returns true if tag value exists.
+func (i *Index) HasTagValue(name, key, value []byte) (bool, error) {
+ n := i.availableThreads()
-// Close closes the iterator.
-func (itr *seriesPointIterator) Close() error {
- itr.once.Do(func() { itr.fs.Release() })
- return nil
-}
+ // Store errors
+ var found uint32 // Use this to signal we found the tag key.
+ errC := make(chan error, i.PartitionN)
-// Next emits the next point in the iterator.
-func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) {
- for {
- // Create new series iterator, if necessary.
- // Exit if there are no measurements remaining.
- if itr.sitr == nil {
- if itr.mitr == nil {
- return nil, nil
- }
+ // Check each partition for the tag key concurrently.
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check
+ if idx >= len(i.partitions) {
+ return // No more work.
+ }
- m := itr.mitr.Next()
- if m == nil {
- return nil, nil
- }
+ // Check if the tag key has already been found. If it has, we
+ // don't need to check this partition and can just move on.
+ if atomic.LoadUint32(&found) == 1 {
+ errC <- nil
+ continue
+ }
- sitr, err := itr.fs.MeasurementSeriesByExprIterator(m.Name(), itr.opt.Condition, itr.fieldset)
- if err != nil {
- return nil, err
- } else if sitr == nil {
- continue
+ b, err := i.partitions[idx].HasTagValue(name, key, value)
+ if b {
+ atomic.StoreUint32(&found, 1)
+ }
+ errC <- err
}
- itr.sitr = sitr
- }
-
- // Read next series element.
- e := itr.sitr.Next()
- if e == nil {
- itr.sitr = nil
- continue
- }
+ }()
+ }
- // TODO(edd): It seems to me like this authorisation check should be
- // further down in the index. At this point we're going to be filtering
- // series that have already been materialised in the LogFiles and
- // IndexFiles.
- if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.fs.database, e.Name(), e.Tags()) {
- continue
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return false, err
}
+ }
- // Convert to a key.
- key := string(models.MakeKey(e.Name(), e.Tags()))
+ // Check if we found the tag key.
+ return atomic.LoadUint32(&found) == 1, nil
+}
- // Write auxiliary fields.
- for i, f := range itr.opt.Aux {
- switch f.Val {
- case "key":
- itr.point.Aux[i] = key
- }
+// TagKeyIterator returns an iterator for all keys across a single measurement.
+func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) {
+ a := make([]tsdb.TagKeyIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr := p.TagKeyIterator(name)
+ if itr != nil {
+ a = append(a, itr)
}
- return &itr.point, nil
}
+ return tsdb.MergeTagKeyIterators(a...), nil
}
-// unionStringSets returns the union of two sets
-func unionStringSets(a, b map[string]struct{}) map[string]struct{} {
- other := make(map[string]struct{})
- for k := range a {
- other[k] = struct{}{}
- }
- for k := range b {
- other[k] = struct{}{}
+// TagValueIterator returns an iterator for all values across a single key.
+func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) {
+ a := make([]tsdb.TagValueIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr := p.TagValueIterator(name, key)
+ if itr != nil {
+ a = append(a, itr)
+ }
}
- return other
+ return tsdb.MergeTagValueIterators(a...), nil
}
-// intersectStringSets returns the intersection of two sets.
-func intersectStringSets(a, b map[string]struct{}) map[string]struct{} {
- if len(a) < len(b) {
- a, b = b, a
- }
-
- other := make(map[string]struct{})
- for k := range a {
- if _, ok := b[k]; ok {
- other[k] = struct{}{}
+// TagKeySeriesIDIterator returns a series iterator for all values across a single key.
+func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) {
+ a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr := p.TagKeySeriesIDIterator(name, key)
+ if itr != nil {
+ a = append(a, itr)
}
}
- return other
+ return tsdb.MergeSeriesIDIterators(a...), nil
}
-var fileIDRegex = regexp.MustCompile(`^L(\d+)-(\d+)\..+$`)
-
-// ParseFilename extracts the numeric id from a log or index file path.
-// Returns 0 if it cannot be parsed.
-func ParseFilename(name string) (level, id int) {
- a := fileIDRegex.FindStringSubmatch(filepath.Base(name))
- if a == nil {
- return 0, 0
+// TagValueSeriesIDIterator returns a series iterator for a single tag value.
+func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) {
+ a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions))
+ for _, p := range i.partitions {
+ itr := p.TagValueSeriesIDIterator(name, key, value)
+ if itr != nil {
+ a = append(a, itr)
+ }
}
-
- level, _ = strconv.Atoi(a[1])
- id, _ = strconv.Atoi(a[2])
- return id, level
+ return tsdb.MergeSeriesIDIterators(a...), nil
}
-// Manifest represents the list of log & index files that make up the index.
-// The files are listed in time order, not necessarily ID order.
-type Manifest struct {
- Levels []CompactionLevel `json:"levels,omitempty"`
- Files []string `json:"files,omitempty"`
- Version int `json:"version,omitempty"` // Version should be updated whenever the TSI format has changed.
-
- size int64 // Holds the on-disk size of the manifest.
- path string // location on disk of the manifest.
-}
+// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
+func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
+ n := i.availableThreads()
+
+ // Store results.
+ keys := make([]map[string]struct{}, i.PartitionN)
+ errC := make(chan error, i.PartitionN)
+
+ var pidx uint32 // Index of maximum Partition being worked on.
+ for k := 0; k < n; k++ {
+ go func() {
+ for {
+ idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on.
+ if idx >= len(i.partitions) {
+ return // No more work.
+ }
-// NewManifest returns a new instance of Manifest with default compaction levels.
-func NewManifest(path string) *Manifest {
- m := &Manifest{
- Levels: make([]CompactionLevel, len(DefaultCompactionLevels)),
- Version: Version,
- path: path,
+ // This is safe since there are no readers on keys until all
+ // the writers are done.
+ tagKeys, err := i.partitions[idx].MeasurementTagKeysByExpr(name, expr)
+ keys[idx] = tagKeys
+ errC <- err
+ }
+ }()
}
- copy(m.Levels, DefaultCompactionLevels[:])
- return m
-}
-// HasFile returns true if name is listed in the log files or index files.
-func (m *Manifest) HasFile(name string) bool {
- for _, filename := range m.Files {
- if filename == name {
- return true
+ // Check for error
+ for i := 0; i < cap(errC); i++ {
+ if err := <-errC; err != nil {
+ return nil, err
}
}
- return false
-}
-
-// Validate checks if the Manifest's version is compatible with this version
-// of the tsi1 index.
-func (m *Manifest) Validate() error {
- // If we don't have an explicit version in the manifest file then we know
- // it's not compatible with the latest tsi1 Index.
- if m.Version != Version {
- return ErrIncompatibleVersion
- }
- return nil
-}
-// ReadManifestFile reads a manifest from a file path.
-// Write writes the manifest file to the provided path.
-func (m *Manifest) Write() error {
- buf, err := json.MarshalIndent(m, "", " ")
- if err != nil {
- return err
+ // Merge into single map.
+ result := keys[0]
+ for k := 1; k < len(i.partitions); k++ {
+ for k := range keys[k] {
+ result[k] = struct{}{}
+ }
}
- buf = append(buf, '\n')
- m.size = int64(len(buf))
- return ioutil.WriteFile(m.path, buf, 0666)
+ return result, nil
}
-// ReadManifestFile reads a manifest from a file path and returns the manifest
-// along with its size and any error.
-func ReadManifestFile(path string) (*Manifest, error) {
- buf, err := ioutil.ReadFile(path)
+// DiskSizeBytes returns the size of the index on disk.
+func (i *Index) DiskSizeBytes() int64 {
+ fs, err := i.RetainFileSet()
if err != nil {
- return nil, err
+ i.logger.Warn("Index is closing down")
+ return 0
}
+ defer fs.Release()
- // Decode manifest.
- var m Manifest
- if err := json.Unmarshal(buf, &m); err != nil {
- return nil, err
+ var manifestSize int64
+ // Get MANIFEST sizes from each partition.
+ for _, p := range i.partitions {
+ manifestSize += p.manifestSize
}
- // Set the size of the manifest.
- m.size = int64(len(buf))
- m.path = path
-
- return &m, nil
+ return fs.Size() + manifestSize
}
-func joinIntSlice(a []int, sep string) string {
- other := make([]string, len(a))
- for i := range a {
- other[i] = strconv.Itoa(a[i])
- }
- return strings.Join(other, sep)
+// TagKeyCardinality always returns zero.
+// It is not possible to determine cardinality of tags across index files, and
+// thus it cannot be done across partitions.
+func (i *Index) TagKeyCardinality(name, key []byte) int {
+ return 0
}
-// CompactionLevel represents a grouping of index files based on bloom filter
-// settings. By having the same bloom filter settings, the filters
-// can be merged and evaluated at a higher level.
-type CompactionLevel struct {
- // Bloom filter bit size & hash count
- M uint64 `json:"m,omitempty"`
- K uint64 `json:"k,omitempty"`
-}
+// RetainFileSet returns the set of all files across all partitions.
+// This is only needed when all files need to be retained for an operation.
+func (i *Index) RetainFileSet() (*FileSet, error) {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
-// DefaultCompactionLevels is the default settings used by the index.
-var DefaultCompactionLevels = []CompactionLevel{
- {M: 0, K: 0}, // L0: Log files, no filter.
- {M: 1 << 25, K: 6}, // L1: Initial compaction
- {M: 1 << 25, K: 6}, // L2
- {M: 1 << 26, K: 6}, // L3
- {M: 1 << 27, K: 6}, // L4
- {M: 1 << 28, K: 6}, // L5
- {M: 1 << 29, K: 6}, // L6
- {M: 1 << 30, K: 6}, // L7
+ fs, _ := NewFileSet(i.database, nil, i.sfile, nil)
+ for _, p := range i.partitions {
+ pfs, err := p.RetainFileSet()
+ if err != nil {
+ fs.Close()
+ return nil, err
+ }
+ fs.files = append(fs.files, pfs.files...)
+ }
+ return fs, nil
}
-// MaxIndexMergeCount is the maximum number of files that can be merged together at once.
-const MaxIndexMergeCount = 2
-
-// MaxIndexFileSize is the maximum expected size of an index file.
-const MaxIndexFileSize = 4 * (1 << 30)
+func (i *Index) SetFieldName(measurement []byte, name string) {}
+func (i *Index) RemoveShard(shardID uint64) {}
+func (i *Index) AssignShard(k string, shardID uint64) {}
-// generateCompactionToken returns a short token to track an individual compaction.
-// It is only used for logging so it doesn't need strong uniqueness guarantees.
-func generateCompactionToken() string {
- token := make([]byte, 3)
- rand.Read(token)
- return fmt.Sprintf("%x", token)
+// UnassignShard removes the provided series key from the index. The naming of
+// this method stems from a legacy index logic that used to track which shards
+// owned which series.
+func (i *Index) UnassignShard(k string, id uint64, ts int64) error {
+ // This can be called directly once inmem is gone.
+ return i.DropSeries([]byte(k), ts)
}
+
+func (i *Index) Rebuild() {}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go
index b43397b4ad..9be24fb29d 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go
@@ -9,7 +9,6 @@ import (
"sync"
"github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/pkg/bloom"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/mmap"
"github.com/influxdata/influxdb/tsdb"
@@ -25,14 +24,10 @@ const FileSignature = "TSI1"
const (
// IndexFile trailer fields
IndexFileVersionSize = 2
- SeriesBlockOffsetSize = 8
- SeriesBlockSizeSize = 8
MeasurementBlockOffsetSize = 8
MeasurementBlockSizeSize = 8
IndexFileTrailerSize = IndexFileVersionSize +
- SeriesBlockOffsetSize +
- SeriesBlockSizeSize +
MeasurementBlockOffsetSize +
MeasurementBlockSizeSize
)
@@ -49,7 +44,7 @@ type IndexFile struct {
data []byte
// Components
- sblk SeriesBlock
+ sfile *tsdb.SeriesFile
tblks map[string]*TagBlock // tag blocks by measurement name
mblk MeasurementBlock
@@ -57,9 +52,6 @@ type IndexFile struct {
level int
id int
- // Counters
- seriesN int64 // Number of unique series in this indexFile.
-
// Compaction tracking.
mu sync.RWMutex
compacting bool
@@ -69,8 +61,8 @@ type IndexFile struct {
}
// NewIndexFile returns a new instance of IndexFile.
-func NewIndexFile() *IndexFile {
- return &IndexFile{}
+func NewIndexFile(sfile *tsdb.SeriesFile) *IndexFile {
+ return &IndexFile{sfile: sfile}
}
// Open memory maps the data file at the file's path.
@@ -78,7 +70,7 @@ func (f *IndexFile) Open() error {
// Extract identifier from path name.
f.id, f.level = ParseFilename(f.Path())
- data, err := mmap.Map(f.Path())
+ data, err := mmap.Map(f.Path(), 0)
if err != nil {
return err
}
@@ -91,10 +83,9 @@ func (f *IndexFile) Close() error {
// Wait until all references are released.
f.wg.Wait()
- f.sblk = SeriesBlock{}
+ f.sfile = nil
f.tblks = nil
f.mblk = MeasurementBlock{}
- f.seriesN = 0
return mmap.Unmap(f.data)
}
@@ -110,9 +101,6 @@ func (f *IndexFile) SetPath(path string) { f.path = path }
// Level returns the compaction level for the file.
func (f *IndexFile) Level() int { return f.level }
-// Filter returns the series existence filter for the file.
-func (f *IndexFile) Filter() *bloom.Filter { return f.sblk.filter }
-
// Retain adds a reference count to the file.
func (f *IndexFile) Retain() { f.wg.Add(1) }
@@ -181,15 +169,6 @@ func (f *IndexFile) UnmarshalBinary(data []byte) error {
f.tblks[string(e.name)] = &tblk
}
- // Slice series list data.
- buf = data[t.SeriesBlock.Offset:]
- buf = buf[:t.SeriesBlock.Size]
-
- // Unmarshal series list.
- if err := f.sblk.UnmarshalBinary(buf); err != nil {
- return err
- }
-
// Save reference to entire data block.
f.data = data
@@ -232,9 +211,9 @@ func (f *IndexFile) TagValueIterator(name, key []byte) TagValueIterator {
return ke.TagValueIterator()
}
-// TagKeySeriesIterator returns a series iterator for a tag key and a flag
+// TagKeySeriesIDIterator returns a series iterator for a tag key and a flag
// indicating if a tombstone exists on the measurement or key.
-func (f *IndexFile) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
+func (f *IndexFile) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
tblk := f.tblks[string(name)]
if tblk == nil {
return nil
@@ -248,37 +227,31 @@ func (f *IndexFile) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
// Merge all value series iterators together.
vitr := ke.TagValueIterator()
- var itrs []tsdb.SeriesIterator
+ var itrs []tsdb.SeriesIDIterator
for ve := vitr.Next(); ve != nil; ve = vitr.Next() {
sitr := &rawSeriesIDIterator{data: ve.(*TagBlockValueElem).series.data}
- itrs = append(itrs, newSeriesDecodeIterator(&f.sblk, sitr))
+ itrs = append(itrs, sitr)
}
- return MergeSeriesIterators(itrs...)
+ return tsdb.MergeSeriesIDIterators(itrs...)
}
-// TagValueSeriesIterator returns a series iterator for a tag value and a flag
+// TagValueSeriesIDIterator returns a series iterator for a tag value and a flag
// indicating if a tombstone exists on the measurement, key, or value.
-func (f *IndexFile) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator {
+func (f *IndexFile) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
tblk := f.tblks[string(name)]
if tblk == nil {
return nil
}
// Find value element.
- ve := tblk.TagValueElem(key, value)
- if ve == nil {
+ n, data := tblk.TagValueSeriesData(key, value)
+ if n == 0 {
return nil
}
// Create an iterator over value's series.
- return newSeriesDecodeIterator(
- &f.sblk,
- &rawSeriesIDIterator{
- n: ve.(*TagBlockValueElem).series.n,
- data: ve.(*TagBlockValueElem).series.data,
- },
- )
+ return &rawSeriesIDIterator{n: n, data: data}
}
// TagKey returns a tag key.
@@ -301,13 +274,7 @@ func (f *IndexFile) TagValue(name, key, value []byte) TagValueElem {
// HasSeries returns flags indicating if the series exists and if it is tombstoned.
func (f *IndexFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {
- return f.sblk.HasSeries(name, tags, buf)
-}
-
-// Series returns the series and a flag indicating if the series has been
-// tombstoned by the measurement.
-func (f *IndexFile) Series(name []byte, tags models.Tags) tsdb.SeriesElem {
- return f.sblk.Series(name, tags)
+ return f.sfile.HasSeries(name, tags, buf), false // TODO(benbjohnson): series tombstone
}
// TagValueElem returns an element for a measurement/tag/value.
@@ -333,12 +300,9 @@ func (f *IndexFile) TagKeyIterator(name []byte) TagKeyIterator {
return blk.TagKeyIterator()
}
-// MeasurementSeriesIterator returns an iterator over a measurement's series.
-func (f *IndexFile) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
- return &seriesDecodeIterator{
- itr: f.mblk.seriesIDIterator(name),
- sblk: &f.sblk,
- }
+// MeasurementSeriesIDIterator returns an iterator over a measurement's series.
+func (f *IndexFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
+ return f.mblk.SeriesIDIterator(name)
}
// MergeMeasurementsSketches merges the index file's series sketches into the provided
@@ -350,25 +314,6 @@ func (f *IndexFile) MergeMeasurementsSketches(s, t estimator.Sketch) error {
return t.Merge(f.mblk.tSketch)
}
-// SeriesN returns the total number of non-tombstoned series for the index file.
-func (f *IndexFile) SeriesN() uint64 {
- return uint64(f.sblk.seriesN - f.sblk.tombstoneN)
-}
-
-// SeriesIterator returns an iterator over all series.
-func (f *IndexFile) SeriesIterator() tsdb.SeriesIterator {
- return f.sblk.SeriesIterator()
-}
-
-// MergeSeriesSketches merges the index file's series sketches into the provided
-// sketches.
-func (f *IndexFile) MergeSeriesSketches(s, t estimator.Sketch) error {
- if err := s.Merge(f.sblk.sketch); err != nil {
- return err
- }
- return t.Merge(f.sblk.tsketch)
-}
-
// ReadIndexFileTrailer returns the index file trailer from data.
func ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) {
var t IndexFileTrailer
@@ -382,12 +327,6 @@ func ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) {
// Slice trailer data.
buf := data[len(data)-IndexFileTrailerSize:]
- // Read series list info.
- t.SeriesBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockOffsetSize]))
- buf = buf[SeriesBlockOffsetSize:]
- t.SeriesBlock.Size = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockSizeSize]))
- buf = buf[SeriesBlockSizeSize:]
-
// Read measurement block info.
t.MeasurementBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:MeasurementBlockOffsetSize]))
buf = buf[MeasurementBlockOffsetSize:]
@@ -399,11 +338,7 @@ func ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) {
// IndexFileTrailer represents meta data written to the end of the index file.
type IndexFileTrailer struct {
- Version int
- SeriesBlock struct {
- Offset int64
- Size int64
- }
+ Version int
MeasurementBlock struct {
Offset int64
Size int64
@@ -412,13 +347,6 @@ type IndexFileTrailer struct {
// WriteTo writes the trailer to w.
func (t *IndexFileTrailer) WriteTo(w io.Writer) (n int64, err error) {
- // Write series list info.
- if err := writeUint64To(w, uint64(t.SeriesBlock.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint64To(w, uint64(t.SeriesBlock.Size), &n); err != nil {
- return n, err
- }
-
// Write measurement block info.
if err := writeUint64To(w, uint64(t.MeasurementBlock.Offset), &n); err != nil {
return n, err
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go
index edabd49761..69a359034c 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go
@@ -5,12 +5,16 @@ import (
"testing"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
)
// Ensure a simple index file can be built and opened.
func TestCreateIndexFile(t *testing.T) {
- f, err := CreateIndexFile([]Series{
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ f, err := CreateIndexFile(sfile.SeriesFile, []Series{
{Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})},
{Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
{Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})},
@@ -28,8 +32,11 @@ func TestCreateIndexFile(t *testing.T) {
// Ensure index file generation can be successfully built.
func TestGenerateIndexFile(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Build generated index file.
- f, err := GenerateIndexFile(10, 3, 4)
+ f, err := GenerateIndexFile(sfile.SeriesFile, 10, 3, 4)
if err != nil {
t.Fatal(err)
}
@@ -44,13 +51,19 @@ func TestGenerateIndexFile(t *testing.T) {
func BenchmarkIndexFile_TagValueSeries(b *testing.B) {
b.Run("M=1,K=2,V=3", func(b *testing.B) {
- benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(1, 2, 3))
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 1, 2, 3))
})
b.Run("M=10,K=5,V=5", func(b *testing.B) {
- benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 5, 5))
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 5, 5))
})
b.Run("M=10,K=7,V=5", func(b *testing.B) {
- benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 7, 7))
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 7, 7))
})
}
@@ -68,8 +81,8 @@ func benchmarkIndexFile_TagValueSeries(b *testing.B, idx *tsi1.IndexFile) {
}
// CreateIndexFile creates an index file with a given set of series.
-func CreateIndexFile(series []Series) (*tsi1.IndexFile, error) {
- lf, err := CreateLogFile(series)
+func CreateIndexFile(sfile *tsdb.SeriesFile, series []Series) (*tsi1.IndexFile, error) {
+ lf, err := CreateLogFile(sfile, series)
if err != nil {
return nil, err
}
@@ -90,9 +103,9 @@ func CreateIndexFile(series []Series) (*tsi1.IndexFile, error) {
// GenerateIndexFile generates an index file from a set of series based on the count arguments.
// Total series returned will equal measurementN * tagN * valueN.
-func GenerateIndexFile(measurementN, tagN, valueN int) (*tsi1.IndexFile, error) {
+func GenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*tsi1.IndexFile, error) {
// Generate a new log file first.
- lf, err := GenerateLogFile(measurementN, tagN, valueN)
+ lf, err := GenerateLogFile(sfile, measurementN, tagN, valueN)
if err != nil {
return nil, err
}
@@ -111,8 +124,8 @@ func GenerateIndexFile(measurementN, tagN, valueN int) (*tsi1.IndexFile, error)
return &f, nil
}
-func MustGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile {
- f, err := GenerateIndexFile(measurementN, tagN, valueN)
+func MustGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile {
+ f, err := GenerateIndexFile(sfile, measurementN, tagN, valueN)
if err != nil {
panic(err)
}
@@ -128,7 +141,7 @@ var indexFileCache struct {
}
// MustFindOrGenerateIndexFile returns a cached index file or generates one if it doesn't exist.
-func MustFindOrGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile {
+func MustFindOrGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile {
// Use cache if fields match and the index file has been generated.
if indexFileCache.MeasurementN == measurementN &&
indexFileCache.TagN == tagN &&
@@ -141,7 +154,7 @@ func MustFindOrGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile
indexFileCache.MeasurementN = measurementN
indexFileCache.TagN = tagN
indexFileCache.ValueN = valueN
- indexFileCache.IndexFile = MustGenerateIndexFile(measurementN, tagN, valueN)
+ indexFileCache.IndexFile = MustGenerateIndexFile(sfile, measurementN, tagN, valueN)
return indexFileCache.IndexFile
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go
index c3b9b618f7..811f9e469c 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go
@@ -2,15 +2,12 @@ package tsi1
import (
"bufio"
- "fmt"
"io"
"os"
"sort"
"time"
"github.com/influxdata/influxdb/pkg/bytesutil"
- "github.com/influxdata/influxdb/pkg/estimator/hll"
- "github.com/influxdata/influxdb/pkg/mmap"
"github.com/influxdata/influxdb/tsdb"
)
@@ -90,46 +87,34 @@ func (p *IndexFiles) TagKeyIterator(name []byte) (TagKeyIterator, error) {
return MergeTagKeyIterators(a...), nil
}
-// SeriesIterator returns an iterator that merges series across all files.
-func (p IndexFiles) SeriesIterator() tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(p))
+// MeasurementSeriesIDIterator returns an iterator that merges series across all files.
+func (p IndexFiles) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(p))
for _, f := range p {
- itr := f.SeriesIterator()
+ itr := f.MeasurementSeriesIDIterator(name)
if itr == nil {
continue
}
a = append(a, itr)
}
- return MergeSeriesIterators(a...)
+ return tsdb.MergeSeriesIDIterators(a...)
}
-// MeasurementSeriesIterator returns an iterator that merges series across all files.
-func (p IndexFiles) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(p))
- for _, f := range p {
- itr := f.MeasurementSeriesIterator(name)
- if itr == nil {
- continue
- }
- a = append(a, itr)
- }
- return MergeSeriesIterators(a...)
-}
+// TagValueSeriesIDIterator returns an iterator that merges series across all files.
+func (p IndexFiles) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
+ a := make([]tsdb.SeriesIDIterator, 0, len(p))
-// TagValueSeriesIterator returns an iterator that merges series across all files.
-func (p IndexFiles) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator {
- a := make([]tsdb.SeriesIterator, 0, len(p))
for i := range p {
- itr := p[i].TagValueSeriesIterator(name, key, value)
+ itr := p[i].TagValueSeriesIDIterator(name, key, value)
if itr != nil {
a = append(a, itr)
}
}
- return MergeSeriesIterators(a...)
+ return tsdb.MergeSeriesIDIterators(a...)
}
// CompactTo merges all index files and writes them to w.
-func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {
+func (p IndexFiles) CompactTo(w io.Writer, sfile *tsdb.SeriesFile, m, k uint64) (n int64, err error) {
var t IndexFileTrailer
// Wrap writer in buffered I/O.
@@ -144,28 +129,11 @@ func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {
return n, err
}
- // Write combined series list.
- t.SeriesBlock.Offset = n
- if err := p.writeSeriesBlockTo(bw, m, k, &info, &n); err != nil {
- return n, err
- }
- t.SeriesBlock.Size = n - t.SeriesBlock.Offset
-
// Flush buffer before re-mapping.
if err := bw.Flush(); err != nil {
return n, err
}
- // Open series block as memory-mapped data.
- sblk, data, err := mapIndexFileSeriesBlock(w)
- if data != nil {
- defer mmap.Unmap(data)
- }
- if err != nil {
- return n, err
- }
- info.sblk = sblk
-
// Write tagset blocks in measurement order.
if err := p.writeTagsetsTo(bw, &info, &n); err != nil {
return n, err
@@ -193,35 +161,6 @@ func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {
return n, nil
}
-func (p IndexFiles) writeSeriesBlockTo(w io.Writer, m, k uint64, info *indexCompactInfo, n *int64) error {
- // Estimate series cardinality.
- sketch := hll.NewDefaultPlus()
- for _, f := range p {
- if err := f.MergeSeriesSketches(sketch, sketch); err != nil {
- return err
- }
- }
-
- itr := p.SeriesIterator()
- enc := NewSeriesBlockEncoder(w, uint32(sketch.Count()), m, k)
-
- // Write all series.
- for e := itr.Next(); e != nil; e = itr.Next() {
- if err := enc.Encode(e.Name(), e.Tags(), e.Deleted()); err != nil {
- return err
- }
- }
-
- // Close and flush block.
- err := enc.Close()
- *n += int64(enc.N())
- if err != nil {
- return err
- }
-
- return nil
-}
-
func (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64) error {
mitr := p.MeasurementIterator()
if mitr == nil {
@@ -238,7 +177,7 @@ func (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64
// writeTagsetTo writes a single tagset to w and saves the tagset offset.
func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactInfo, n *int64) error {
- var seriesKey []byte
+ var seriesIDs []uint64
kitr, err := p.TagKeyIterator(name)
if err != nil {
@@ -255,21 +194,28 @@ func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactIn
// Iterate over tag values.
vitr := ke.TagValueIterator()
for ve := vitr.Next(); ve != nil; ve = vitr.Next() {
+ seriesIDs = seriesIDs[:0]
+
// Merge all series together.
- sitr := p.TagValueSeriesIterator(name, ke.Key(), ve.Value())
- var seriesIDs []uint32
- for se := sitr.Next(); se != nil; se = sitr.Next() {
- seriesID, _ := info.sblk.Offset(se.Name(), se.Tags(), seriesKey[:0])
- if seriesID == 0 {
- return fmt.Errorf("expected series id: %s/%s", se.Name(), se.Tags().String())
+ if err := func() error {
+ sitr := p.TagValueSeriesIDIterator(name, ke.Key(), ve.Value())
+ if sitr != nil {
+ defer sitr.Close()
+ for {
+ se, err := sitr.Next()
+ if err != nil {
+ return err
+ } else if se.SeriesID == 0 {
+ break
+ }
+ seriesIDs = append(seriesIDs, se.SeriesID)
+ }
}
- seriesIDs = append(seriesIDs, seriesID)
- }
- sort.Sort(uint32Slice(seriesIDs))
- // Encode value.
- if err := enc.EncodeValue(ve.Value(), ve.Deleted(), seriesIDs); err != nil {
- return err
+ // Encode value.
+ return enc.EncodeValue(ve.Value(), ve.Deleted(), seriesIDs)
+ }(); err != nil {
+ return nil
}
}
}
@@ -294,7 +240,6 @@ func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactIn
}
func (p IndexFiles) writeMeasurementBlockTo(w io.Writer, info *indexCompactInfo, n *int64) error {
- var seriesKey []byte
mw := NewMeasurementBlockWriter()
// Add measurement data & compute sketches.
@@ -304,20 +249,30 @@ func (p IndexFiles) writeMeasurementBlockTo(w io.Writer, info *indexCompactInfo,
name := m.Name()
// Look-up series ids.
- itr := p.MeasurementSeriesIterator(name)
- var seriesIDs []uint32
- for e := itr.Next(); e != nil; e = itr.Next() {
- seriesID, _ := info.sblk.Offset(e.Name(), e.Tags(), seriesKey[:0])
- if seriesID == 0 {
- panic(fmt.Sprintf("expected series id: %s %s", e.Name(), e.Tags().String()))
+ if err := func() error {
+ itr := p.MeasurementSeriesIDIterator(name)
+ defer itr.Close()
+
+ var seriesIDs []uint64
+ for {
+ e, err := itr.Next()
+ if err != nil {
+ return err
+ } else if e.SeriesID == 0 {
+ break
+ }
+ seriesIDs = append(seriesIDs, e.SeriesID)
}
- seriesIDs = append(seriesIDs, seriesID)
- }
- sort.Sort(uint32Slice(seriesIDs))
+ sort.Sort(uint64Slice(seriesIDs))
+
+ // Add measurement to writer.
+ pos := info.tagSets[string(name)]
+ mw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs)
- // Add measurement to writer.
- pos := info.tagSets[string(name)]
- mw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs)
+ return nil
+ }(); err != nil {
+ return err
+ }
}
}
@@ -359,9 +314,7 @@ type IndexFilesInfo struct {
// indexCompactInfo is a context object used for tracking position information
// during the compaction of index files.
type indexCompactInfo struct {
- // Memory-mapped series block.
- // Available after the series block has been written.
- sblk *SeriesBlock
+ sfile *tsdb.SeriesFile
// Tracks offset/size for each measurement's tagset.
tagSets map[string]indexTagSetPos
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go
index 6baf4b9127..bd6494be5e 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go
@@ -10,8 +10,11 @@ import (
// Ensure multiple index files can be compacted together.
func TestIndexFiles_WriteTo(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Write first file.
- f0, err := CreateIndexFile([]Series{
+ f0, err := CreateIndexFile(sfile.SeriesFile, []Series{
{Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})},
{Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
{Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})},
@@ -21,7 +24,7 @@ func TestIndexFiles_WriteTo(t *testing.T) {
}
// Write second file.
- f1, err := CreateIndexFile([]Series{
+ f1, err := CreateIndexFile(sfile.SeriesFile, []Series{
{Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
{Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "east"})},
})
@@ -32,7 +35,7 @@ func TestIndexFiles_WriteTo(t *testing.T) {
// Compact the two together and write out to a buffer.
var buf bytes.Buffer
a := tsi1.IndexFiles{f0, f1}
- if n, err := a.CompactTo(&buf, M, K); err != nil {
+ if n, err := a.CompactTo(&buf, sfile.SeriesFile, M, K); err != nil {
t.Fatal(err)
} else if n == 0 {
t.Fatal("expected data written")
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go
index 022ab20bf2..318588bf59 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go
@@ -10,6 +10,7 @@ import (
"testing"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
)
@@ -18,7 +19,10 @@ const M, K = 4096, 6
// Ensure index can iterate over all measurement names.
func TestIndex_ForEachMeasurementName(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Add series to index.
@@ -71,7 +75,10 @@ func TestIndex_ForEachMeasurementName(t *testing.T) {
// Ensure index can return whether a measurement exists.
func TestIndex_MeasurementExists(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Add series to index.
@@ -122,7 +129,10 @@ func TestIndex_MeasurementExists(t *testing.T) {
// Ensure index can return a list of matching measurements.
func TestIndex_MeasurementNamesByRegex(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Add series to index.
@@ -147,7 +157,10 @@ func TestIndex_MeasurementNamesByRegex(t *testing.T) {
// Ensure index can delete a measurement and all related keys, values, & series.
func TestIndex_DropMeasurement(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ idx := MustOpenIndex(sfile.SeriesFile, 1)
defer idx.Close()
// Add series to index.
@@ -175,7 +188,10 @@ func TestIndex_DropMeasurement(t *testing.T) {
}
// Obtain file set to perform lower level checks.
- fs := idx.RetainFileSet()
+ fs, err := idx.PartitionAt(0).RetainFileSet()
+ if err != nil {
+ t.Fatal(err)
+ }
defer fs.Release()
// Verify tags & values are gone.
@@ -190,16 +206,22 @@ func TestIndex_DropMeasurement(t *testing.T) {
}
func TestIndex_Open(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Opening a fresh index should set the MANIFEST version to current version.
- idx := NewIndex()
+ idx := NewIndex(sfile.SeriesFile, tsi1.DefaultPartitionN)
t.Run("open new index", func(t *testing.T) {
if err := idx.Open(); err != nil {
t.Fatal(err)
}
// Check version set appropriately.
- if got, exp := idx.Manifest().Version, 1; got != exp {
- t.Fatalf("got index version %d, expected %d", got, exp)
+ for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ {
+ partition := idx.PartitionAt(i)
+ if got, exp := partition.Manifest().Version, 1; got != exp {
+ t.Fatalf("got index version %d, expected %d", got, exp)
+ }
}
})
@@ -217,13 +239,19 @@ func TestIndex_Open(t *testing.T) {
incompatibleVersions := []int{-1, 0, 2}
for _, v := range incompatibleVersions {
t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) {
- idx = NewIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ idx = NewIndex(sfile.SeriesFile, tsi1.DefaultPartitionN)
// Manually create a MANIFEST file for an incompatible index version.
- mpath := filepath.Join(idx.Path, tsi1.ManifestFileName)
+ // under one of the partitions.
+ partitionPath := filepath.Join(idx.Path(), "2")
+ os.MkdirAll(partitionPath, 0777)
+
+ mpath := filepath.Join(partitionPath, tsi1.ManifestFileName)
m := tsi1.NewManifest(mpath)
m.Levels = nil
m.Version = v // Set example MANIFEST version.
- if err := m.Write(); err != nil {
+ if _, err := m.Write(); err != nil {
t.Fatal(err)
}
@@ -247,15 +275,24 @@ func TestIndex_Open(t *testing.T) {
func TestIndex_Manifest(t *testing.T) {
t.Run("current MANIFEST", func(t *testing.T) {
- idx := MustOpenIndex()
- if got, exp := idx.Manifest().Version, tsi1.Version; got != exp {
- t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ idx := MustOpenIndex(sfile.SeriesFile, tsi1.DefaultPartitionN)
+
+ // Check version set appropriately.
+ for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ {
+ partition := idx.PartitionAt(i)
+ if got, exp := partition.Manifest().Version, tsi1.Version; got != exp {
+ t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
+ }
}
})
}
func TestIndex_DiskSizeBytes(t *testing.T) {
- idx := MustOpenIndex()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+ idx := MustOpenIndex(sfile.SeriesFile, tsi1.DefaultPartitionN)
defer idx.Close()
// Add series to index.
@@ -269,7 +306,12 @@ func TestIndex_DiskSizeBytes(t *testing.T) {
}
// Verify on disk size is the same in each stage.
- expSize := int64(520) // 419 bytes for MANIFEST and 101 bytes for index file
+ // Each series stores flag(1) + series(uvarint(2)) + len(name)(1) + len(key)(1) + len(value)(1) + checksum(4).
+ expSize := int64(4 * 10)
+
+ // Each MANIFEST file is 419 bytes and there are tsi1.DefaultPartitionN of them
+ expSize += int64(tsi1.DefaultPartitionN * 419)
+
idx.Run(t, func(t *testing.T) {
if got, exp := idx.DiskSizeBytes(), expSize; got != exp {
t.Fatalf("got %d bytes, expected %d", got, exp)
@@ -283,15 +325,15 @@ type Index struct {
}
// NewIndex returns a new instance of Index at a temporary path.
-func NewIndex() *Index {
- idx := &Index{Index: tsi1.NewIndex()}
- idx.Path = MustTempDir()
+func NewIndex(sfile *tsdb.SeriesFile, partitionN uint64) *Index {
+ idx := &Index{Index: tsi1.NewIndex(sfile, tsi1.WithPath(MustTempDir()))}
+ idx.PartitionN = partitionN
return idx
}
// MustOpenIndex returns a new, open index. Panic on error.
-func MustOpenIndex() *Index {
- idx := NewIndex()
+func MustOpenIndex(sfile *tsdb.SeriesFile, partitionN uint64) *Index {
+ idx := NewIndex(sfile, partitionN)
if err := idx.Open(); err != nil {
panic(err)
}
@@ -300,7 +342,7 @@ func MustOpenIndex() *Index {
// Close closes and removes the index directory.
func (idx *Index) Close() error {
- defer os.RemoveAll(idx.Path)
+ defer os.RemoveAll(idx.Path())
return idx.Index.Close()
}
@@ -310,9 +352,11 @@ func (idx *Index) Reopen() error {
return err
}
- path := idx.Path
- idx.Index = tsi1.NewIndex()
- idx.Path = path
+ sfile := idx.SeriesFile()
+ path, partitionN := idx.Path(), idx.PartitionN
+
+ idx.Index = tsi1.NewIndex(sfile, tsi1.WithPath(path))
+ idx.PartitionN = partitionN
if err := idx.Open(); err != nil {
return err
}
@@ -355,7 +399,7 @@ func (idx *Index) Run(t *testing.T, fn func(t *testing.T)) {
// CreateSeriesSliceIfNotExists creates multiple series at a time.
func (idx *Index) CreateSeriesSliceIfNotExists(a []Series) error {
for i, s := range a {
- if err := idx.CreateSeriesIfNotExists(nil, s.Name, s.Tags); err != nil {
+ if err := idx.CreateSeriesListIfNotExists(nil, [][]byte{s.Name}, []models.Tags{s.Tags}); err != nil {
return fmt.Errorf("i=%d, name=%s, tags=%v, err=%s", i, s.Name, s.Tags, err)
}
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go
index d8e32d7ba9..3a7ce3dd62 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go
@@ -19,7 +19,6 @@ import (
"github.com/influxdata/influxdb/pkg/estimator/hll"
"github.com/influxdata/influxdb/pkg/mmap"
"github.com/influxdata/influxdb/tsdb"
- "github.com/influxdata/influxql"
)
// Log errors.
@@ -45,8 +44,9 @@ type LogFile struct {
w *bufio.Writer // buffered writer
buf []byte // marshaling buffer
- size int64 // tracks current file size
- modTime time.Time // tracks last time write occurred
+ sfile *tsdb.SeriesFile // series lookup
+ size int64 // tracks current file size
+ modTime time.Time // tracks last time write occurred
mSketch, mTSketch estimator.Sketch // Measurement sketches
sSketch, sTSketch estimator.Sketch // Series sketche
@@ -59,8 +59,9 @@ type LogFile struct {
}
// NewLogFile returns a new instance of LogFile.
-func NewLogFile(path string) *LogFile {
+func NewLogFile(sfile *tsdb.SeriesFile, path string) *LogFile {
return &LogFile{
+ sfile: sfile,
path: path,
mms: make(logMeasurements),
mSketch: hll.NewDefaultPlus(),
@@ -101,7 +102,7 @@ func (f *LogFile) open() error {
f.modTime = fi.ModTime()
// Open a read-only memory map of the existing data.
- data, err := mmap.Map(f.Path())
+ data, err := mmap.Map(f.Path(), 0)
if err != nil {
return err
}
@@ -245,8 +246,8 @@ func (f *LogFile) DeleteMeasurement(name []byte) error {
return nil
}
-// TagKeySeriesIterator returns a series iterator for a tag key.
-func (f *LogFile) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
+// TagKeySeriesIDIterator returns a series iterator for a tag key.
+func (f *LogFile) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
f.mu.RLock()
defer f.mu.RUnlock()
@@ -261,15 +262,15 @@ func (f *LogFile) TagKeySeriesIterator(name, key []byte) tsdb.SeriesIterator {
}
// Combine iterators across all tag keys.
- itrs := make([]tsdb.SeriesIterator, 0, len(tk.tagValues))
+ itrs := make([]tsdb.SeriesIDIterator, 0, len(tk.tagValues))
for _, tv := range tk.tagValues {
if len(tv.series) == 0 {
continue
}
- itrs = append(itrs, newLogSeriesIterator(tv.series))
+ itrs = append(itrs, newLogSeriesIDIterator(tv.series))
}
- return MergeSeriesIterators(itrs...)
+ return tsdb.MergeSeriesIDIterators(itrs...)
}
// TagKeyIterator returns a value iterator for a measurement.
@@ -352,7 +353,7 @@ func (f *LogFile) DeleteTagKey(name, key []byte) error {
f.mu.Lock()
defer f.mu.Unlock()
- e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Tags: models.Tags{{Key: key}}}
+ e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Key: key}
if err := f.appendEntry(&e); err != nil {
return err
}
@@ -360,8 +361,8 @@ func (f *LogFile) DeleteTagKey(name, key []byte) error {
return nil
}
-// TagValueSeriesIterator returns a series iterator for a tag value.
-func (f *LogFile) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIterator {
+// TagValueSeriesIDIterator returns a series iterator for a tag value.
+func (f *LogFile) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
f.mu.RLock()
defer f.mu.RUnlock()
@@ -382,7 +383,7 @@ func (f *LogFile) TagValueSeriesIterator(name, key, value []byte) tsdb.SeriesIte
return nil
}
- return newLogSeriesIterator(tv.series)
+ return newLogSeriesIDIterator(tv.series)
}
// MeasurementN returns the total number of measurements.
@@ -419,7 +420,7 @@ func (f *LogFile) DeleteTagValue(name, key, value []byte) error {
f.mu.Lock()
defer f.mu.Unlock()
- e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Tags: models.Tags{{Key: key, Value: value}}}
+ e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Key: key, Value: value}
if err := f.appendEntry(&e); err != nil {
return err
}
@@ -428,90 +429,53 @@ func (f *LogFile) DeleteTagValue(name, key, value []byte) error {
}
// AddSeriesList adds a list of series to the log file in bulk.
-func (f *LogFile) AddSeriesList(names [][]byte, tagsSlice []models.Tags) error {
- // Determine total size of names, keys, values.
- var n int
- for i := range names {
- n += len(names[i])
+func (f *LogFile) AddSeriesList(seriesSet *tsdb.SeriesIDSet, names [][]byte, tagsSlice []models.Tags) error {
+ buf := make([]byte, 2048)
- tags := tagsSlice[i]
- for j := range tags {
- n += len(tags[j].Key) + len(tags[j].Value)
- }
+ seriesIDs, err := f.sfile.CreateSeriesListIfNotExists(names, tagsSlice, buf[:0])
+ if err != nil {
+ return err
}
- // Allocate names, keys, & values in one block.
- buf := make([]byte, n)
-
- // Clone all entries.
- entries := make([]LogEntry, len(names))
+ var writeRequired bool
+ entries := make([]LogEntry, 0, len(names))
+ seriesSet.RLock()
for i := range names {
- copy(buf, names[i])
- clonedName := buf[:len(names[i])]
- buf = buf[len(names[i]):]
-
- // Clone tag set.
- var clonedTags models.Tags
- if len(tagsSlice[i]) > 0 {
- clonedTags = make(models.Tags, len(tagsSlice[i]))
- for j, tags := range tagsSlice[i] {
- copy(buf, tags.Key)
- key := buf[:len(tags.Key)]
- buf = buf[len(tags.Key):]
-
- copy(buf, tags.Value)
- value := buf[:len(tags.Value)]
- buf = buf[len(tags.Value):]
-
- clonedTags[j] = models.Tag{Key: key, Value: value}
- }
+ if seriesSet.ContainsNoLock(seriesIDs[i]) {
+ // We don't need to allocate anything for this series.
+ continue
}
+ writeRequired = true
+ entries = append(entries, LogEntry{SeriesID: seriesIDs[i]})
+ }
+ seriesSet.RUnlock()
- entries[i] = LogEntry{Name: clonedName, Tags: clonedTags}
+ // Exit if all series already exist.
+ if !writeRequired {
+ return nil
}
f.mu.Lock()
defer f.mu.Unlock()
+ seriesSet.Lock()
+ defer seriesSet.Unlock()
+
for i := range entries {
- if err := f.appendEntry(&entries[i]); err != nil {
+ entry := &entries[i]
+ if seriesSet.ContainsNoLock(entry.SeriesID) {
+ // We don't need to allocate anything for this series.
+ continue
+ }
+ if err := f.appendEntry(entry); err != nil {
return err
}
- f.execEntry(&entries[i])
+ f.execEntry(entry)
+ seriesSet.AddNoLock(entry.SeriesID)
}
return nil
}
-// AddSeries adds a series to the log file.
-func (f *LogFile) AddSeries(name []byte, tags models.Tags) error {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- // The name and tags are clone to prevent a memory leak
- newName := make([]byte, len(name))
- copy(newName, name)
-
- e := LogEntry{Name: newName, Tags: tags.Clone()}
- if err := f.appendEntry(&e); err != nil {
- return err
- }
- f.execEntry(&e)
- return nil
-}
-
-// DeleteSeries adds a tombstone for a series to the log file.
-func (f *LogFile) DeleteSeries(name []byte, tags models.Tags) error {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- e := LogEntry{Flag: LogEntrySeriesTombstoneFlag, Name: name, Tags: tags}
- if err := f.appendEntry(&e); err != nil {
- return err
- }
- f.execEntry(&e)
- return nil
-}
-
// SeriesN returns the total number of series in the file.
func (f *LogFile) SeriesN() (n uint64) {
f.mu.RLock()
@@ -523,13 +487,14 @@ func (f *LogFile) SeriesN() (n uint64) {
return n
}
+/*
// HasSeries returns flags indicating if the series exists and if it is tombstoned.
func (f *LogFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {
e := f.SeriesWithBuffer(name, tags, buf)
- if e == nil {
+ if e.SeriesID == 0 {
return false, false
}
- return true, e.Deleted()
+ return true, e.Deleted
}
// FilterNamesTags filters out any series which already exist. It modifies the
@@ -560,30 +525,7 @@ func (f *LogFile) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]
}
return newNames, newTagsSlice
}
-
-// Series returns a series by name/tags.
-func (f *LogFile) Series(name []byte, tags models.Tags) tsdb.SeriesElem {
- return f.SeriesWithBuffer(name, tags, nil)
-}
-
-// SeriesWithBuffer returns a series by name/tags.
-func (f *LogFile) SeriesWithBuffer(name []byte, tags models.Tags, buf []byte) tsdb.SeriesElem {
- key := AppendSeriesKey(buf[:0], name, tags)
-
- f.mu.RLock()
- defer f.mu.RUnlock()
-
- mm, ok := f.mms[string(name)]
- if !ok {
- return nil
- }
-
- s := mm.series[string(key)]
- if s == nil {
- return nil
- }
- return s
-}
+*/
// appendEntry adds a log entry to the end of the file.
func (f *LogFile) appendEntry(e *LogEntry) error {
@@ -600,7 +542,7 @@ func (f *LogFile) appendEntry(e *LogEntry) error {
// Log should be reopened if seeking cannot be completed.
if n > 0 {
f.w.Reset(f.file)
- if _, err := f.file.Seek(int64(-n), os.SEEK_CUR); err != nil {
+ if _, err := f.file.Seek(int64(-n), io.SeekCurrent); err != nil {
f.Close()
}
}
@@ -633,126 +575,115 @@ func (f *LogFile) execDeleteMeasurementEntry(e *LogEntry) {
mm := f.createMeasurementIfNotExists(e.Name)
mm.deleted = true
mm.tagSet = make(map[string]logTagKey)
- mm.series = make(map[string]*logSerie)
+ mm.series = make(map[uint64]struct{})
// Update measurement tombstone sketch.
f.mTSketch.Add(e.Name)
}
func (f *LogFile) execDeleteTagKeyEntry(e *LogEntry) {
- key := e.Tags[0].Key
-
mm := f.createMeasurementIfNotExists(e.Name)
- ts := mm.createTagSetIfNotExists(key)
+ ts := mm.createTagSetIfNotExists(e.Key)
ts.deleted = true
- mm.tagSet[string(key)] = ts
+ mm.tagSet[string(e.Key)] = ts
}
func (f *LogFile) execDeleteTagValueEntry(e *LogEntry) {
- key, value := e.Tags[0].Key, e.Tags[0].Value
-
mm := f.createMeasurementIfNotExists(e.Name)
- ts := mm.createTagSetIfNotExists(key)
- tv := ts.createTagValueIfNotExists(value)
+ ts := mm.createTagSetIfNotExists(e.Key)
+ tv := ts.createTagValueIfNotExists(e.Value)
tv.deleted = true
- ts.tagValues[string(value)] = tv
- mm.tagSet[string(key)] = ts
+ ts.tagValues[string(e.Value)] = tv
+ mm.tagSet[string(e.Key)] = ts
}
func (f *LogFile) execSeriesEntry(e *LogEntry) {
- // Check if series is deleted.
- deleted := (e.Flag & LogEntrySeriesTombstoneFlag) != 0
+ seriesKey := f.sfile.SeriesKey(e.SeriesID)
+ assert(seriesKey != nil, fmt.Sprintf("series key for ID: %d not found", e.SeriesID))
- // Fetch measurement.
- mm := f.createMeasurementIfNotExists(e.Name)
+ // Read key size.
+ _, remainder := tsdb.ReadSeriesKeyLen(seriesKey)
- // Undelete measurement if it's been tombstoned previously.
- if !deleted && mm.deleted {
- mm.deleted = false
- }
+ // Read measurement name.
+ name, remainder := tsdb.ReadSeriesKeyMeasurement(remainder)
+ mm := f.createMeasurementIfNotExists(name)
+ mm.deleted = false
+ mm.series[e.SeriesID] = struct{}{}
- // Generate key & series, if not exists.
- key := AppendSeriesKey(nil, e.Name, e.Tags)
- serie := mm.createSeriesIfNotExists(key, e.Name, e.Tags, deleted)
+ // Read tag count.
+ tagN, remainder := tsdb.ReadSeriesKeyTagN(remainder)
// Save tags.
- for _, t := range e.Tags {
- ts := mm.createTagSetIfNotExists(t.Key)
- tv := ts.createTagValueIfNotExists(t.Value)
+ var k, v []byte
+ for i := 0; i < tagN; i++ {
+ k, v, remainder = tsdb.ReadSeriesKeyTag(remainder)
+ ts := mm.createTagSetIfNotExists(k)
+ tv := ts.createTagValueIfNotExists(v)
// Add a reference to the series on the tag value.
- tv.series[string(key)] = serie
+ tv.series[e.SeriesID] = struct{}{}
- ts.tagValues[string(t.Value)] = tv
- mm.tagSet[string(t.Key)] = ts
- }
-
- // Update the sketches.
- if deleted {
- // TODO(edd) decrement series count...
- f.sTSketch.Add(key) // Deleting series so update tombstone sketch.
- return
+ ts.tagValues[string(v)] = tv
+ mm.tagSet[string(k)] = ts
}
// TODO(edd) increment series count....
- f.sSketch.Add(key) // Add series to sketch.
- f.mSketch.Add(e.Name) // Add measurement to sketch as this may be the fist series for the measurement.
+ f.sSketch.Add(seriesKey) // Add series to sketch.
+ f.mSketch.Add(name) // Add measurement to sketch as this may be the fist series for the measurement.
}
-// SeriesIterator returns an iterator over all series in the log file.
-func (f *LogFile) SeriesIterator() tsdb.SeriesIterator {
+// SeriesIDIterator returns an iterator over all series in the log file.
+func (f *LogFile) SeriesIDIterator() tsdb.SeriesIDIterator {
f.mu.RLock()
defer f.mu.RUnlock()
// Determine total series count across all measurements.
var n int
mSeriesIdx := make([]int, len(f.mms))
- mSeries := make([][]logSerie, 0, len(f.mms))
+ mSeries := make([][]tsdb.SeriesIDElem, 0, len(f.mms))
for _, mm := range f.mms {
n += len(mm.series)
- a := make([]logSerie, 0, len(mm.series))
- for _, s := range mm.series {
- a = append(a, *s)
+ a := make([]tsdb.SeriesIDElem, 0, len(mm.series))
+ for seriesID := range mm.series {
+ a = append(a, tsdb.SeriesIDElem{SeriesID: seriesID})
}
- sort.Sort(logSeries(a))
+ sort.Sort(tsdb.SeriesIDElems(a))
mSeries = append(mSeries, a)
}
// Combine series across all measurements by merging the already sorted
// series lists.
- sBuffer := make([]*logSerie, len(f.mms))
- series := make(logSeries, 0, n)
- var (
- minSerie *logSerie
- minSerieIdx int
- )
+ sBuffer := make([]tsdb.SeriesIDElem, len(f.mms))
+ series := make([]tsdb.SeriesIDElem, 0, n)
+ var minElem tsdb.SeriesIDElem
+ var minElemIdx int
for s := 0; s < cap(series); s++ {
for i := 0; i < len(sBuffer); i++ {
// Are there still serie to pull from this measurement?
- if mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i] == nil {
+ if mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i].SeriesID == 0 {
// Fill the buffer slot for this measurement.
- sBuffer[i] = &mSeries[i][mSeriesIdx[i]]
+ sBuffer[i] = mSeries[i][mSeriesIdx[i]]
mSeriesIdx[i]++
}
// Does this measurement have the smallest current serie out of
// all those in the buffer?
- if minSerie == nil || (sBuffer[i] != nil && sBuffer[i].Compare(minSerie.name, minSerie.tags) < 0) {
- minSerie, minSerieIdx = sBuffer[i], i
+ if minElem.SeriesID == 0 || (sBuffer[i].SeriesID != 0 && sBuffer[i].SeriesID < minElem.SeriesID) {
+ minElem, minElemIdx = sBuffer[i], i
}
}
- series, minSerie, sBuffer[minSerieIdx] = append(series, *minSerie), nil, nil
+ series, minElem.SeriesID, sBuffer[minElemIdx].SeriesID = append(series, minElem), 0, 0
}
if len(series) == 0 {
return nil
}
- return &logSeriesIterator{series: series}
+ return &logSeriesIDIterator{series: series}
}
// createMeasurementIfNotExists returns a measurement by name.
@@ -762,7 +693,7 @@ func (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement {
mm = &logMeasurement{
name: name,
tagSet: make(map[string]logTagKey),
- series: make(map[string]*logSerie),
+ series: make(map[uint64]struct{}),
}
f.mms[string(name)] = mm
}
@@ -782,8 +713,8 @@ func (f *LogFile) MeasurementIterator() MeasurementIterator {
return &itr
}
-// MeasurementSeriesIterator returns an iterator over all series for a measurement.
-func (f *LogFile) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
+// MeasurementSeriesIDIterator returns an iterator over all series for a measurement.
+func (f *LogFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
f.mu.RLock()
defer f.mu.RUnlock()
@@ -791,7 +722,7 @@ func (f *LogFile) MeasurementSeriesIterator(name []byte) tsdb.SeriesIterator {
if mm == nil || len(mm.series) == 0 {
return nil
}
- return newLogSeriesIterator(mm.series)
+ return newLogSeriesIDIterator(mm.series)
}
// CompactTo compacts the log file and writes it to w.
@@ -814,24 +745,11 @@ func (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {
// Retreve measurement names in order.
names := f.measurementNames()
- // Write series list.
- t.SeriesBlock.Offset = n
- if err := f.writeSeriesBlockTo(bw, names, m, k, info, &n); err != nil {
- return n, err
- }
- t.SeriesBlock.Size = n - t.SeriesBlock.Offset
-
// Flush buffer & mmap series block.
if err := bw.Flush(); err != nil {
return n, err
}
- // Update series offsets.
- // NOTE: Pass the raw writer so we can mmap.
- if err := f.updateSeriesOffsets(w, names, info); err != nil {
- return n, err
- }
-
// Write tagset blocks in measurement order.
if err := f.writeTagsetsTo(bw, names, info, &n); err != nil {
return n, err
@@ -859,84 +777,6 @@ func (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {
return n, nil
}
-func (f *LogFile) writeSeriesBlockTo(w io.Writer, names []string, m, k uint64, info *logFileCompactInfo, n *int64) error {
- // Determine series count.
- var seriesN uint32
- for _, mm := range f.mms {
- seriesN += uint32(len(mm.series))
- }
-
- // Write all series.
- enc := NewSeriesBlockEncoder(w, seriesN, m, k)
-
- // Add series from measurements.
- for _, name := range names {
- mm := f.mms[name]
-
- // Sort series.
- keys := make([][]byte, 0, len(mm.series))
- for k := range mm.series {
- keys = append(keys, []byte(k))
- }
- sort.Sort(seriesKeys(keys))
-
- for _, key := range keys {
- serie := mm.series[string(key)]
- if err := enc.Encode(serie.name, serie.tags, serie.deleted); err != nil {
- return err
- }
- }
- }
-
- // Close and flush series block.
- err := enc.Close()
- *n += int64(enc.N())
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (f *LogFile) updateSeriesOffsets(w io.Writer, names []string, info *logFileCompactInfo) error {
- // Open series block.
- sblk, data, err := mapIndexFileSeriesBlock(w)
- if data != nil {
- defer mmap.Unmap(data)
- }
- if err != nil {
- return err
- }
-
- // Add series to each measurement and key/value.
- var seriesKey []byte
- for _, name := range names {
- mm := f.mms[name]
- mmInfo := info.createMeasurementInfoIfNotExists(name)
- mmInfo.seriesIDs = make([]uint32, 0, len(mm.series))
-
- for _, serie := range mm.series {
- // Lookup series offset.
- offset, _ := sblk.Offset(serie.name, serie.tags, seriesKey[:0])
- if offset == 0 {
- panic("series not found: " + string(serie.name) + " " + serie.tags.String())
- }
-
- // Add series id to measurement, tag key, and tag value.
- mmInfo.seriesIDs = append(mmInfo.seriesIDs, offset)
-
- // Add series id to each tag value.
- for _, tag := range serie.tags {
- tagSetInfo := mmInfo.createTagSetInfoIfNotExists(tag.Key)
- tagValueInfo := tagSetInfo.createTagValueInfoIfNotExists(tag.Value)
- tagValueInfo.seriesIDs = append(tagValueInfo.seriesIDs, offset)
- }
- }
- }
-
- return nil
-}
-
func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error {
for _, name := range names {
if err := f.writeTagsetTo(w, name, info, n); err != nil {
@@ -949,7 +789,6 @@ func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompa
// writeTagsetTo writes a single tagset to w and saves the tagset offset.
func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactInfo, n *int64) error {
mm := f.mms[name]
- mmInfo := info.mms[name]
enc := NewTagBlockEncoder(w)
for _, k := range mm.keys() {
@@ -962,10 +801,6 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn
continue
}
- // Lookup compaction info.
- tagSetInfo := mmInfo.tagSet[k]
- assert(tagSetInfo != nil, "tag set info not found")
-
// Sort tag values.
values := make([]string, 0, len(tag.tagValues))
for v := range tag.tagValues {
@@ -976,17 +811,14 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn
// Add each value.
for _, v := range values {
value := tag.tagValues[v]
- tagValueInfo := tagSetInfo.tagValues[v]
- sort.Sort(uint32Slice(tagValueInfo.seriesIDs))
-
- if err := enc.EncodeValue(value.name, value.deleted, tagValueInfo.seriesIDs); err != nil {
+ if err := enc.EncodeValue(value.name, value.deleted, value.seriesIDs()); err != nil {
return err
}
}
}
// Save tagset offset to measurement.
- mmInfo.offset = *n
+ offset := *n
// Flush tag block.
err := enc.Close()
@@ -996,7 +828,9 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn
}
// Save tagset offset to measurement.
- mmInfo.size = *n - mmInfo.offset
+ size := *n - offset
+
+ info.mms[name] = &logFileMeasurementCompactInfo{offset: offset, size: size}
return nil
}
@@ -1009,9 +843,7 @@ func (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *log
mm := f.mms[name]
mmInfo := info.mms[name]
assert(mmInfo != nil, "measurement info not found")
-
- sort.Sort(uint32Slice(mmInfo.seriesIDs))
- mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mmInfo.seriesIDs)
+ mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mm.seriesIDs())
}
// Flush data to writer.
@@ -1032,63 +864,9 @@ func newLogFileCompactInfo() *logFileCompactInfo {
}
}
-func (info *logFileCompactInfo) createMeasurementInfoIfNotExists(name string) *logFileMeasurementCompactInfo {
- mmInfo := info.mms[name]
- if mmInfo == nil {
- mmInfo = &logFileMeasurementCompactInfo{
- tagSet: make(map[string]*logFileTagSetCompactInfo),
- }
- info.mms[name] = mmInfo
- }
- return mmInfo
-}
-
type logFileMeasurementCompactInfo struct {
- offset int64
- size int64
- seriesIDs []uint32
-
- tagSet map[string]*logFileTagSetCompactInfo
-}
-
-func (info *logFileMeasurementCompactInfo) createTagSetInfoIfNotExists(key []byte) *logFileTagSetCompactInfo {
- tagSetInfo := info.tagSet[string(key)]
- if tagSetInfo == nil {
- tagSetInfo = &logFileTagSetCompactInfo{tagValues: make(map[string]*logFileTagValueCompactInfo)}
- info.tagSet[string(key)] = tagSetInfo
- }
- return tagSetInfo
-}
-
-type logFileTagSetCompactInfo struct {
- tagValues map[string]*logFileTagValueCompactInfo
-}
-
-func (info *logFileTagSetCompactInfo) createTagValueInfoIfNotExists(value []byte) *logFileTagValueCompactInfo {
- tagValueInfo := info.tagValues[string(value)]
- if tagValueInfo == nil {
- tagValueInfo = &logFileTagValueCompactInfo{}
- info.tagValues[string(value)] = tagValueInfo
- }
- return tagValueInfo
-}
-
-type logFileTagValueCompactInfo struct {
- seriesIDs []uint32
-}
-
-// MergeSeriesSketches merges the series sketches belonging to this LogFile
-// into the provided sketches.
-//
-// MergeSeriesSketches is safe for concurrent use by multiple goroutines.
-func (f *LogFile) MergeSeriesSketches(sketch, tsketch estimator.Sketch) error {
- f.mu.RLock()
- defer f.mu.RUnlock()
-
- if err := sketch.Merge(f.sSketch); err != nil {
- return err
- }
- return tsketch.Merge(f.sTSketch)
+ offset int64
+ size int64
}
// MergeMeasurementsSketches merges the measurement sketches belonging to this
@@ -1107,11 +885,13 @@ func (f *LogFile) MergeMeasurementsSketches(sketch, tsketch estimator.Sketch) er
// LogEntry represents a single log entry in the write-ahead log.
type LogEntry struct {
- Flag byte // flag
- Name []byte // measurement name
- Tags models.Tags // tagset
- Checksum uint32 // checksum of flag/name/tags.
- Size int // total size of record, in bytes.
+ Flag byte // flag
+ SeriesID uint64 // series id
+ Name []byte // measurement name
+ Key []byte // tag key
+ Value []byte // tag value
+ Checksum uint32 // checksum of flag/name/tags.
+ Size int // total size of record, in bytes.
}
// UnmarshalBinary unmarshals data into e.
@@ -1125,6 +905,13 @@ func (e *LogEntry) UnmarshalBinary(data []byte) error {
}
e.Flag, data = data[0], data[1:]
+ // Parse series id.
+ if len(data) < 1 {
+ return io.ErrShortBuffer
+ }
+ seriesID, n := binary.Uvarint(data)
+ e.SeriesID, data = uint64(seriesID), data[n:]
+
// Parse name length.
if len(data) < 1 {
return io.ErrShortBuffer
@@ -1137,43 +924,29 @@ func (e *LogEntry) UnmarshalBinary(data []byte) error {
}
e.Name, data = data[n:n+int(sz)], data[n+int(sz):]
- // Parse tag count.
+ // Parse key length.
if len(data) < 1 {
return io.ErrShortBuffer
}
- tagN, n := binary.Uvarint(data)
- data = data[n:]
-
- // Parse tags.
- tags := make(models.Tags, tagN)
- for i := range tags {
- tag := &tags[i]
+ sz, n = binary.Uvarint(data)
- // Parse key length.
- if len(data) < 1 {
- return io.ErrShortBuffer
- }
- sz, n := binary.Uvarint(data)
-
- // Read key data.
- if len(data) < n+int(sz) {
- return io.ErrShortBuffer
- }
- tag.Key, data = data[n:n+int(sz)], data[n+int(sz):]
+ // Read key data.
+ if len(data) < n+int(sz) {
+ return io.ErrShortBuffer
+ }
+ e.Key, data = data[n:n+int(sz)], data[n+int(sz):]
- // Parse value.
- if len(data) < 1 {
- return io.ErrShortBuffer
- }
- sz, n = binary.Uvarint(data)
+ // Parse value length.
+ if len(data) < 1 {
+ return io.ErrShortBuffer
+ }
+ sz, n = binary.Uvarint(data)
- // Read value data.
- if len(data) < n+int(sz) {
- return io.ErrShortBuffer
- }
- tag.Value, data = data[n:n+int(sz)], data[n+int(sz):]
+ // Read value data.
+ if len(data) < n+int(sz) {
+ return io.ErrShortBuffer
}
- e.Tags = tags
+ e.Value, data = data[n:n+int(sz)], data[n+int(sz):]
// Compute checksum.
chk := crc32.ChecksumIEEE(orig[:start-len(data)])
@@ -1204,29 +977,24 @@ func appendLogEntry(dst []byte, e *LogEntry) []byte {
// Append flag.
dst = append(dst, e.Flag)
+ // Append series id.
+ n := binary.PutUvarint(buf[:], uint64(e.SeriesID))
+ dst = append(dst, buf[:n]...)
+
// Append name.
- n := binary.PutUvarint(buf[:], uint64(len(e.Name)))
+ n = binary.PutUvarint(buf[:], uint64(len(e.Name)))
dst = append(dst, buf[:n]...)
dst = append(dst, e.Name...)
- // Append tag count.
- n = binary.PutUvarint(buf[:], uint64(len(e.Tags)))
+ // Append key.
+ n = binary.PutUvarint(buf[:], uint64(len(e.Key)))
dst = append(dst, buf[:n]...)
+ dst = append(dst, e.Key...)
- // Append key/value pairs.
- for i := range e.Tags {
- t := &e.Tags[i]
-
- // Append key.
- n := binary.PutUvarint(buf[:], uint64(len(t.Key)))
- dst = append(dst, buf[:n]...)
- dst = append(dst, t.Key...)
-
- // Append value.
- n = binary.PutUvarint(buf[:], uint64(len(t.Value)))
- dst = append(dst, buf[:n]...)
- dst = append(dst, t.Value...)
- }
+ // Append value.
+ n = binary.PutUvarint(buf[:], uint64(len(e.Value)))
+ dst = append(dst, buf[:n]...)
+ dst = append(dst, e.Value...)
// Calculate checksum.
e.Checksum = crc32.ChecksumIEEE(dst[start:])
@@ -1238,10 +1006,10 @@ func appendLogEntry(dst []byte, e *LogEntry) []byte {
return dst
}
+/*
type logSerie struct {
- name []byte
- tags models.Tags
- deleted bool
+ name []byte
+ tags models.Tags
}
func (s *logSerie) String() string {
@@ -1266,6 +1034,7 @@ func (a logSeries) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a logSeries) Less(i, j int) bool {
return a[i].Compare(a[j].name, a[j].tags) == -1
}
+*/
// logMeasurements represents a map of measurement names to measurements.
type logMeasurements map[string]*logMeasurement
@@ -1284,11 +1053,22 @@ type logMeasurement struct {
name []byte
tagSet map[string]logTagKey
deleted bool
- series map[string]*logSerie
+ series map[uint64]struct{}
+}
+
+func (mm *logMeasurement) seriesIDs() []uint64 {
+ a := make([]uint64, 0, len(mm.series))
+ for seriesID := range mm.series {
+ a = append(a, seriesID)
+ }
+ sort.Sort(uint64Slice(a))
+ return a
}
func (m *logMeasurement) Name() []byte { return m.name }
func (m *logMeasurement) Deleted() bool { return m.deleted }
+
+/*
func (m *logMeasurement) HasSeries() bool {
if m.deleted {
return false
@@ -1300,6 +1080,7 @@ func (m *logMeasurement) HasSeries() bool {
}
return false
}
+*/
func (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey {
ts, ok := m.tagSet[string(key)]
@@ -1309,18 +1090,6 @@ func (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey {
return ts
}
-// createSeriesIfNotExists creates or returns an existing series on the measurement.
-func (m *logMeasurement) createSeriesIfNotExists(key []byte, name []byte, tags models.Tags, deleted bool) *logSerie {
- s := m.series[string(key)]
- if s == nil {
- s = &logSerie{name: name, tags: tags, deleted: deleted}
- m.series[string(key)] = s
- } else {
- s.deleted = deleted
- }
- return s
-}
-
// keys returns a sorted list of tag keys.
func (m *logMeasurement) keys() []string {
a := make([]string, 0, len(m.tagSet))
@@ -1372,7 +1141,7 @@ func (tk *logTagKey) TagValueIterator() TagValueIterator {
func (tk *logTagKey) createTagValueIfNotExists(value []byte) logTagValue {
tv, ok := tk.tagValues[string(value)]
if !ok {
- tv = logTagValue{name: value, series: make(map[string]*logSerie)}
+ tv = logTagValue{name: value, series: make(map[uint64]struct{})}
}
return tv
}
@@ -1387,7 +1156,16 @@ func (a logTagKeySlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[
type logTagValue struct {
name []byte
deleted bool
- series map[string]*logSerie
+ series map[uint64]struct{}
+}
+
+func (tv *logTagValue) seriesIDs() []uint64 {
+ a := make([]uint64, 0, len(tv.series))
+ for seriesID := range tv.series {
+ a = append(a, seriesID)
+ }
+ sort.Sort(uint64Slice(a))
+ return a
}
func (tv *logTagValue) Value() []byte { return tv.name }
@@ -1440,34 +1218,37 @@ func (itr *logTagValueIterator) Next() (e TagValueElem) {
return e
}
-// logSeriesIterator represents an iterator over a slice of series.
-type logSeriesIterator struct {
- series logSeries
+// logSeriesIDIterator represents an iterator over a slice of series.
+type logSeriesIDIterator struct {
+ series []tsdb.SeriesIDElem
}
-// newLogSeriesIterator returns a new instance of logSeriesIterator.
+// newLogSeriesIDIterator returns a new instance of logSeriesIDIterator.
// All series are copied to the iterator.
-func newLogSeriesIterator(m map[string]*logSerie) *logSeriesIterator {
+func newLogSeriesIDIterator(m map[uint64]struct{}) *logSeriesIDIterator {
if len(m) == 0 {
return nil
}
- itr := logSeriesIterator{series: make(logSeries, 0, len(m))}
- for _, s := range m {
- itr.series = append(itr.series, *s)
+ itr := logSeriesIDIterator{series: make([]tsdb.SeriesIDElem, 0, len(m))}
+ for seriesID := range m {
+ itr.series = append(itr.series, tsdb.SeriesIDElem{SeriesID: seriesID})
}
- sort.Sort(itr.series)
+ sort.Sort(tsdb.SeriesIDElems(itr.series))
return &itr
}
+func (itr *logSeriesIDIterator) Close() error { return nil }
+
// Next returns the next element in the iterator.
-func (itr *logSeriesIterator) Next() (e tsdb.SeriesElem) {
+func (itr *logSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) {
if len(itr.series) == 0 {
- return nil
+ return tsdb.SeriesIDElem{}, nil
}
- e, itr.series = &itr.series[0], itr.series[1:]
- return e
+ elem := itr.series[0]
+ itr.series = itr.series[1:]
+ return elem, nil
}
// FormatLogFileName generates a log filename for the given index.
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go
index 9a8c041cd0..897cef64d4 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go
@@ -15,20 +15,29 @@ import (
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/bloom"
+ "github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
)
// Ensure log file can append series.
-func TestLogFile_AddSeries(t *testing.T) {
- f := MustOpenLogFile()
+func TestLogFile_AddSeriesList(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ f := MustOpenLogFile(sfile.SeriesFile)
defer f.Close()
+ seriesSet := tsdb.NewSeriesIDSet()
// Add test data.
- if err := f.AddSeries([]byte("mem"), models.Tags{{Key: []byte("host"), Value: []byte("serverA")}}); err != nil {
- t.Fatal(err)
- } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}); err != nil {
- t.Fatal(err)
- } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{
+ []byte("mem"),
+ []byte("cpu"),
+ []byte("cpu"),
+ }, []models.Tags{
+ {{Key: []byte("host"), Value: []byte("serverA")}},
+ {{Key: []byte("region"), Value: []byte("us-east")}},
+ {{Key: []byte("region"), Value: []byte("us-west")}},
+ }); err != nil {
t.Fatal(err)
}
@@ -59,8 +68,12 @@ func TestLogFile_AddSeries(t *testing.T) {
}
func TestLogFile_SeriesStoredInOrder(t *testing.T) {
- f := MustOpenLogFile()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ f := MustOpenLogFile(sfile.SeriesFile)
defer f.Close()
+ seriesSet := tsdb.NewSeriesIDSet()
// Generate and add test data
tvm := make(map[string]struct{})
@@ -69,11 +82,13 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) {
tv := fmt.Sprintf("server-%d", rand.Intn(50)) // Encourage adding duplicate series.
tvm[tv] = struct{}{}
- if err := f.AddSeries([]byte("mem"), models.Tags{models.NewTag([]byte("host"), []byte(tv))}); err != nil {
- t.Fatal(err)
- }
-
- if err := f.AddSeries([]byte("cpu"), models.Tags{models.NewTag([]byte("host"), []byte(tv))}); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{
+ []byte("mem"),
+ []byte("cpu"),
+ }, []models.Tags{
+ {models.NewTag([]byte("host"), []byte(tv))},
+ {models.NewTag([]byte("host"), []byte(tv))},
+ }); err != nil {
t.Fatal(err)
}
}
@@ -89,45 +104,44 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) {
tvs = append(tvs, tvs...)
// When we pull the series out via an iterator they should be in order.
- itr := f.SeriesIterator()
+ itr := f.SeriesIDIterator()
if itr == nil {
t.Fatal("nil iterator")
}
- mname := []string{"cpu", "mem"}
- var j int
+ var prevSeriesID uint64
for i := 0; i < len(tvs); i++ {
- serie := itr.Next()
- if serie == nil {
+ elem, err := itr.Next()
+ if err != nil {
+ t.Fatal(err)
+ } else if elem.SeriesID == 0 {
t.Fatal("got nil series")
+ } else if elem.SeriesID < prevSeriesID {
+ t.Fatalf("series out of order: %d !< %d ", elem.SeriesID, prevSeriesID)
}
-
- if got, exp := string(serie.Name()), mname[j]; got != exp {
- t.Fatalf("[series %d] got %s, expected %s", i, got, exp)
- }
-
- if got, exp := string(serie.Tags()[0].Value), tvs[i]; got != exp {
- t.Fatalf("[series %d] got %s, expected %s", i, got, exp)
- }
-
- if i == (len(tvs)/2)-1 {
- // Next measurement
- j++
- }
+ prevSeriesID = elem.SeriesID
}
}
// Ensure log file can delete an existing measurement.
func TestLogFile_DeleteMeasurement(t *testing.T) {
- f := MustOpenLogFile()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ f := MustOpenLogFile(sfile.SeriesFile)
defer f.Close()
+ seriesSet := tsdb.NewSeriesIDSet()
// Add test data.
- if err := f.AddSeries([]byte("mem"), models.Tags{{Key: []byte("host"), Value: []byte("serverA")}}); err != nil {
- t.Fatal(err)
- } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}); err != nil {
- t.Fatal(err)
- } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{
+ []byte("mem"),
+ []byte("cpu"),
+ []byte("cpu"),
+ }, []models.Tags{
+ {{Key: []byte("host"), Value: []byte("serverA")}},
+ {{Key: []byte("region"), Value: []byte("us-east")}},
+ {{Key: []byte("region"), Value: []byte("us-west")}},
+ }); err != nil {
t.Fatal(err)
}
@@ -153,19 +167,19 @@ type LogFile struct {
}
// NewLogFile returns a new instance of LogFile with a temporary file path.
-func NewLogFile() *LogFile {
+func NewLogFile(sfile *tsdb.SeriesFile) *LogFile {
file, err := ioutil.TempFile("", "tsi1-log-file-")
if err != nil {
panic(err)
}
file.Close()
- return &LogFile{LogFile: tsi1.NewLogFile(file.Name())}
+ return &LogFile{LogFile: tsi1.NewLogFile(sfile, file.Name())}
}
// MustOpenLogFile returns a new, open instance of LogFile. Panic on error.
-func MustOpenLogFile() *LogFile {
- f := NewLogFile()
+func MustOpenLogFile(sfile *tsdb.SeriesFile) *LogFile {
+ f := NewLogFile(sfile)
if err := f.Open(); err != nil {
panic(err)
}
@@ -190,10 +204,11 @@ func (f *LogFile) Reopen() error {
}
// CreateLogFile creates a new temporary log file and adds a list of series.
-func CreateLogFile(series []Series) (*LogFile, error) {
- f := MustOpenLogFile()
+func CreateLogFile(sfile *tsdb.SeriesFile, series []Series) (*LogFile, error) {
+ f := MustOpenLogFile(sfile)
+ seriesSet := tsdb.NewSeriesIDSet()
for _, serie := range series {
- if err := f.AddSeries(serie.Name, serie.Tags); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{serie.Name}, []models.Tags{serie.Tags}); err != nil {
return nil, err
}
}
@@ -202,10 +217,11 @@ func CreateLogFile(series []Series) (*LogFile, error) {
// GenerateLogFile generates a log file from a set of series based on the count arguments.
// Total series returned will equal measurementN * tagN * valueN.
-func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) {
+func GenerateLogFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*LogFile, error) {
tagValueN := pow(valueN, tagN)
- f := MustOpenLogFile()
+ f := MustOpenLogFile(sfile)
+ seriesSet := tsdb.NewSeriesIDSet()
for i := 0; i < measurementN; i++ {
name := []byte(fmt.Sprintf("measurement%d", i))
@@ -217,7 +233,7 @@ func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) {
value := []byte(fmt.Sprintf("value%d", (j / pow(valueN, k) % valueN)))
tags = append(tags, models.NewTag(key, value))
}
- if err := f.AddSeries(name, tags); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{name}, []models.Tags{tags}); err != nil {
return nil, err
}
}
@@ -225,8 +241,8 @@ func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) {
return f, nil
}
-func MustGenerateLogFile(measurementN, tagN, valueN int) *LogFile {
- f, err := GenerateLogFile(measurementN, tagN, valueN)
+func MustGenerateLogFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *LogFile {
+ f, err := GenerateLogFile(sfile, measurementN, tagN, valueN)
if err != nil {
panic(err)
}
@@ -234,8 +250,12 @@ func MustGenerateLogFile(measurementN, tagN, valueN int) *LogFile {
}
func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesValueN int) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
b.StopTimer()
- f := MustOpenLogFile()
+ f := MustOpenLogFile(sfile.SeriesFile)
+ seriesSet := tsdb.NewSeriesIDSet()
type Datum struct {
Name []byte
@@ -268,7 +288,7 @@ func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesVa
for i := 0; i < b.N; i++ {
for _, d := range data {
- if err := f.AddSeries(d.Name, d.Tags); err != nil {
+ if err := f.AddSeriesList(seriesSet, [][]byte{d.Name}, []models.Tags{d.Tags}); err != nil {
b.Fatal(err)
}
}
@@ -288,20 +308,25 @@ func BenchmarkLogFile_WriteTo(b *testing.B) {
for _, seriesN := range []int{1000, 10000, 100000, 1000000} {
name := fmt.Sprintf("series=%d", seriesN)
b.Run(name, func(b *testing.B) {
- f := MustOpenLogFile()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ f := MustOpenLogFile(sfile.SeriesFile)
defer f.Close()
+ seriesSet := tsdb.NewSeriesIDSet()
// Estimate bloom filter size.
m, k := bloom.Estimate(uint64(seriesN), 0.02)
// Initialize log file with series data.
for i := 0; i < seriesN; i++ {
- if err := f.AddSeries(
- []byte("cpu"),
- models.Tags{
+ if err := f.AddSeriesList(
+ seriesSet,
+ [][]byte{[]byte("cpu")},
+ []models.Tags{{
{Key: []byte("host"), Value: []byte(fmt.Sprintf("server-%d", i))},
{Key: []byte("location"), Value: []byte("us-west")},
- },
+ }},
); err != nil {
b.Fatal(err)
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go
index 65367d5578..6e7906a9ae 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go
@@ -10,6 +10,7 @@ import (
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/estimator/hll"
"github.com/influxdata/influxdb/pkg/rhh"
+ "github.com/influxdata/influxdb/tsdb"
)
// MeasurementBlockVersion is the version of the measurement block.
@@ -36,6 +37,8 @@ const (
// Measurement key block fields.
MeasurementNSize = 8
MeasurementOffsetSize = 8
+
+ SeriesIDSize = 8
)
// Measurement errors.
@@ -141,8 +144,8 @@ func (blk *MeasurementBlock) Iterator() MeasurementIterator {
return &blockMeasurementIterator{data: blk.data[MeasurementFillSize:]}
}
-// seriesIDIterator returns an iterator for all series ids in a measurement.
-func (blk *MeasurementBlock) seriesIDIterator(name []byte) seriesIDIterator {
+// SeriesIDIterator returns an iterator for all series ids in a measurement.
+func (blk *MeasurementBlock) SeriesIDIterator(name []byte) tsdb.SeriesIDIterator {
// Find measurement element.
e, ok := blk.Elem(name)
if !ok {
@@ -175,23 +178,25 @@ func (itr *blockMeasurementIterator) Next() MeasurementElem {
// rawSeriesIterator iterates over a list of raw series data.
type rawSeriesIDIterator struct {
- prev uint32
- n uint32
+ prev uint64
+ n uint64
data []byte
}
-// next returns the next decoded series.
-func (itr *rawSeriesIDIterator) next() uint32 {
+func (itr *rawSeriesIDIterator) Close() error { return nil }
+
+// Next returns the next decoded series.
+func (itr *rawSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) {
if len(itr.data) == 0 {
- return 0
+ return tsdb.SeriesIDElem{}, nil
}
delta, n := binary.Uvarint(itr.data)
itr.data = itr.data[n:]
- seriesID := itr.prev + uint32(delta)
+ seriesID := itr.prev + uint64(delta)
itr.prev = seriesID
- return seriesID
+ return tsdb.SeriesIDElem{SeriesID: seriesID}, nil
}
// MeasurementBlockTrailer represents meta data at the end of a MeasurementBlock.
@@ -304,7 +309,7 @@ type MeasurementBlockElem struct {
}
series struct {
- n uint32 // series count
+ n uint64 // series count
data []byte // serialized series data
}
@@ -330,11 +335,11 @@ func (e *MeasurementBlockElem) TagBlockSize() int64 { return e.tagBlock.size }
func (e *MeasurementBlockElem) SeriesData() []byte { return e.series.data }
// SeriesN returns the number of series associated with the measurement.
-func (e *MeasurementBlockElem) SeriesN() uint32 { return e.series.n }
+func (e *MeasurementBlockElem) SeriesN() uint64 { return e.series.n }
// SeriesID returns series ID at an index.
-func (e *MeasurementBlockElem) SeriesID(i int) uint32 {
- return binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:])
+func (e *MeasurementBlockElem) SeriesID(i int) uint64 {
+ return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:])
}
func (e *MeasurementBlockElem) HasSeries() bool { return e.series.n > 0 }
@@ -343,14 +348,14 @@ func (e *MeasurementBlockElem) HasSeries() bool { return e.series.n > 0 }
//
// NOTE: This should be used for testing and diagnostics purposes only.
// It requires loading the entire list of series in-memory.
-func (e *MeasurementBlockElem) SeriesIDs() []uint32 {
- a := make([]uint32, 0, e.series.n)
- var prev uint32
+func (e *MeasurementBlockElem) SeriesIDs() []uint64 {
+ a := make([]uint64, 0, e.series.n)
+ var prev uint64
for data := e.series.data; len(data) > 0; {
delta, n := binary.Uvarint(data)
data = data[n:]
- seriesID := prev + uint32(delta)
+ seriesID := prev + uint64(delta)
a = append(a, seriesID)
prev = seriesID
}
@@ -377,7 +382,7 @@ func (e *MeasurementBlockElem) UnmarshalBinary(data []byte) error {
// Parse series data.
v, n := binary.Uvarint(data)
- e.series.n, data = uint32(v), data[n:]
+ e.series.n, data = uint64(v), data[n:]
sz, n = binary.Uvarint(data)
data = data[n:]
e.series.data, data = data[:sz], data[sz:]
@@ -407,7 +412,7 @@ func NewMeasurementBlockWriter() *MeasurementBlockWriter {
}
// Add adds a measurement with series and tag set offset/size.
-func (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint32) {
+func (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint64) {
mm := mw.mms[string(name)]
mm.deleted = deleted
mm.tagBlock.offset = offset
@@ -539,7 +544,7 @@ func (mw *MeasurementBlockWriter) writeMeasurementTo(w io.Writer, name []byte, m
// Write series data to buffer.
mw.buf.Reset()
- var prev uint32
+ var prev uint64
for _, seriesID := range mm.seriesIDs {
delta := seriesID - prev
@@ -589,7 +594,7 @@ type measurement struct {
offset int64
size int64
}
- seriesIDs []uint32
+ seriesIDs []uint64
offset int64
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go
index 939c6d77cb..9ec6323de0 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go
@@ -104,9 +104,9 @@ func TestMeasurementBlockTrailer_WriteTo(t *testing.T) {
// Ensure measurement blocks can be written and opened.
func TestMeasurementBlockWriter(t *testing.T) {
ms := Measurements{
- NewMeasurement([]byte("foo"), false, 100, 10, []uint32{1, 3, 4}),
- NewMeasurement([]byte("bar"), false, 200, 20, []uint32{2}),
- NewMeasurement([]byte("baz"), false, 300, 30, []uint32{5, 6}),
+ NewMeasurement([]byte("foo"), false, 100, 10, []uint64{1, 3, 4}),
+ NewMeasurement([]byte("bar"), false, 200, 20, []uint64{2}),
+ NewMeasurement([]byte("baz"), false, 300, 30, []uint64{5, 6}),
}
// Write the measurements to writer.
@@ -134,7 +134,7 @@ func TestMeasurementBlockWriter(t *testing.T) {
t.Fatal("expected element")
} else if e.TagBlockOffset() != 100 || e.TagBlockSize() != 10 {
t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize())
- } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{1, 3, 4}) {
+ } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{1, 3, 4}) {
t.Fatalf("unexpected series data: %#v", e.SeriesIDs())
}
@@ -142,7 +142,7 @@ func TestMeasurementBlockWriter(t *testing.T) {
t.Fatal("expected element")
} else if e.TagBlockOffset() != 200 || e.TagBlockSize() != 20 {
t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize())
- } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{2}) {
+ } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{2}) {
t.Fatalf("unexpected series data: %#v", e.SeriesIDs())
}
@@ -150,7 +150,7 @@ func TestMeasurementBlockWriter(t *testing.T) {
t.Fatal("expected element")
} else if e.TagBlockOffset() != 300 || e.TagBlockSize() != 30 {
t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize())
- } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{5, 6}) {
+ } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{5, 6}) {
t.Fatalf("unexpected series data: %#v", e.SeriesIDs())
}
@@ -167,10 +167,10 @@ type Measurement struct {
Deleted bool
Offset int64
Size int64
- ids []uint32
+ ids []uint64
}
-func NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint32) Measurement {
+func NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint64) Measurement {
return Measurement{
Name: name,
Deleted: deleted,
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go
new file mode 100644
index 0000000000..0e7e59d71e
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go
@@ -0,0 +1,1219 @@
+package tsi1
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/bytesutil"
+ "github.com/influxdata/influxdb/pkg/estimator"
+ "github.com/influxdata/influxdb/tsdb"
+ "github.com/influxdata/influxql"
+ "go.uber.org/zap"
+)
+
+// Version is the current version of the TSI index.
+const Version = 1
+
+// DefaultMaxLogFileSize is the default compaction threshold.
+const DefaultMaxLogFileSize = 5 * 1024 * 1024
+
+// File extensions.
+const (
+ LogFileExt = ".tsl"
+ IndexFileExt = ".tsi"
+
+ CompactingExt = ".compacting"
+)
+
+// ManifestFileName is the name of the index manifest file.
+const ManifestFileName = "MANIFEST"
+
+// Partition represents a collection of layered index files and WAL.
+type Partition struct {
+ mu sync.RWMutex
+ opened bool
+
+ sfile *tsdb.SeriesFile // series lookup file
+ activeLogFile *LogFile // current log file
+ fileSet *FileSet // current file set
+ seq int // file id sequence
+
+ // Fast series lookup of series IDs in the series file that have been present
+ // in this partition. This set tracks both insertions and deletions of a series.
+ seriesSet *tsdb.SeriesIDSet
+
+ // Compaction management
+ levels []CompactionLevel // compaction levels
+ levelCompacting []bool // level compaction status
+
+ // Close management.
+ once sync.Once
+ closing chan struct{} // closing is used to inform iterators the partition is closing.
+ wg sync.WaitGroup
+
+ // Fieldset shared with engine.
+ fieldset *tsdb.MeasurementFieldSet
+
+ // Name of database.
+ Database string
+
+ // Directory of the Partition's index files.
+ path string
+ id string // id portion of path.
+
+ // Log file compaction thresholds.
+ MaxLogFileSize int64
+
+ // Frequency of compaction checks.
+ compactionsDisabled bool
+ compactionMonitorInterval time.Duration
+
+ logger *zap.Logger
+
+ // Current size of MANIFEST. Used to determine partition size.
+ manifestSize int64
+
+ // Index's version.
+ version int
+}
+
+// NewPartition returns a new instance of Partition.
+func NewPartition(sfile *tsdb.SeriesFile, path string) *Partition {
+ return &Partition{
+ closing: make(chan struct{}),
+ path: path,
+ sfile: sfile,
+ seriesSet: tsdb.NewSeriesIDSet(),
+
+ // Default compaction thresholds.
+ MaxLogFileSize: DefaultMaxLogFileSize,
+ // compactionEnabled: true,
+
+ logger: zap.NewNop(),
+ version: Version,
+ }
+}
+
+// ErrIncompatibleVersion is returned when attempting to read from an
+// incompatible tsi1 manifest file.
+var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST")
+
+// Open opens the partition.
+func (i *Partition) Open() error {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ i.closing = make(chan struct{})
+
+ if i.opened {
+ return errors.New("index partition already open")
+ }
+
+ // Validate path is correct.
+ i.id = filepath.Base(i.path)
+ _, err := strconv.Atoi(i.id)
+ if err != nil {
+ return err
+ }
+
+ // Create directory if it doesn't exist.
+ if err := os.MkdirAll(i.path, 0777); err != nil {
+ return err
+ }
+
+ // Read manifest file.
+ m, manifestSize, err := ReadManifestFile(filepath.Join(i.path, ManifestFileName))
+ if os.IsNotExist(err) {
+ m = NewManifest(i.ManifestPath())
+ } else if err != nil {
+ return err
+ }
+ // Set manifest size on the partition
+ i.manifestSize = manifestSize
+
+ // Check to see if the MANIFEST file is compatible with the current Index.
+ if err := m.Validate(); err != nil {
+ return err
+ }
+
+ // Copy compaction levels to the index.
+ i.levels = make([]CompactionLevel, len(m.Levels))
+ copy(i.levels, m.Levels)
+
+ // Set up flags to track whether a level is compacting.
+ i.levelCompacting = make([]bool, len(i.levels))
+
+ // Open each file in the manifest.
+ var files []File
+ for _, filename := range m.Files {
+ switch filepath.Ext(filename) {
+ case LogFileExt:
+ f, err := i.openLogFile(filepath.Join(i.path, filename))
+ if err != nil {
+ return err
+ }
+ files = append(files, f)
+
+ // Make first log file active, if within threshold.
+ sz, _ := f.Stat()
+ if i.activeLogFile == nil && sz < i.MaxLogFileSize {
+ i.activeLogFile = f
+ }
+
+ case IndexFileExt:
+ f, err := i.openIndexFile(filepath.Join(i.path, filename))
+ if err != nil {
+ return err
+ }
+ files = append(files, f)
+ }
+ }
+ fs, err := NewFileSet(i.Database, i.levels, i.sfile, files)
+ if err != nil {
+ return err
+ }
+ i.fileSet = fs
+
+ // Set initial sequence number.
+ i.seq = i.fileSet.MaxID()
+
+ // Delete any files not in the manifest.
+ if err := i.deleteNonManifestFiles(m); err != nil {
+ return err
+ }
+
+ // Ensure a log file exists.
+ if i.activeLogFile == nil {
+ if err := i.prependActiveLogFile(); err != nil {
+ return err
+ }
+ }
+
+ // Build series existance set.
+ if err := i.buildSeriesSet(); err != nil {
+ return err
+ }
+
+ // Mark opened.
+ i.opened = true
+
+ // Send a compaction request on start up.
+ i.compact()
+
+ return nil
+}
+
+// openLogFile opens a log file and appends it to the index.
+func (i *Partition) openLogFile(path string) (*LogFile, error) {
+ f := NewLogFile(i.sfile, path)
+ if err := f.Open(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// openIndexFile opens a log file and appends it to the index.
+func (i *Partition) openIndexFile(path string) (*IndexFile, error) {
+ f := NewIndexFile(i.sfile)
+ f.SetPath(path)
+ if err := f.Open(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// deleteNonManifestFiles removes all files not in the manifest.
+func (i *Partition) deleteNonManifestFiles(m *Manifest) error {
+ dir, err := os.Open(i.path)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+
+ fis, err := dir.Readdir(-1)
+ if err != nil {
+ return err
+ }
+
+ // Loop over all files and remove any not in the manifest.
+ for _, fi := range fis {
+ filename := filepath.Base(fi.Name())
+ if filename == ManifestFileName || m.HasFile(filename) {
+ continue
+ }
+
+ if err := os.RemoveAll(filename); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (i *Partition) buildSeriesSet() error {
+ fs := i.retainFileSet()
+ defer fs.Release()
+
+ i.seriesSet = tsdb.NewSeriesIDSet()
+
+ mitr := fs.MeasurementIterator()
+ if mitr == nil {
+ return nil
+ }
+
+ // Iterate over each measurement.
+ for {
+ me := mitr.Next()
+ if me == nil {
+ return nil
+ }
+
+ // Iterate over each series id.
+ if err := func() error {
+ sitr := fs.MeasurementSeriesIDIterator(me.Name())
+ if sitr == nil {
+ return nil
+ }
+ defer sitr.Close()
+
+ for {
+ elem, err := sitr.Next()
+ if err != nil {
+ return err
+ } else if elem.SeriesID == 0 {
+ return nil
+ }
+
+ // Add id to series set.
+ i.seriesSet.Add(elem.SeriesID)
+ }
+ }(); err != nil {
+ return err
+ }
+ }
+}
+
+// Wait returns once outstanding compactions have finished.
+func (i *Partition) Wait() {
+ i.wg.Wait()
+}
+
+// Close closes the index.
+func (i *Partition) Close() error {
+ // Wait for goroutines to finish outstanding compactions.
+ i.wg.Wait()
+
+ // Lock index and close remaining
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ i.once.Do(func() { close(i.closing) })
+
+ // Close log files.
+ for _, f := range i.fileSet.files {
+ f.Close()
+ }
+ i.fileSet.files = nil
+
+ return nil
+}
+
+// closing returns true if the partition is currently closing. It does not require
+// a lock so will always return to callers.
+// func (i *Partition) closing() bool {
+// select {
+// case <-i.closing:
+// return true
+// default:
+// return false
+// }
+// }
+
+// Path returns the path to the partition.
+func (i *Partition) Path() string { return i.path }
+
+// SeriesFile returns the attached series file.
+func (i *Partition) SeriesFile() *tsdb.SeriesFile { return i.sfile }
+
+// NextSequence returns the next file identifier.
+func (i *Partition) NextSequence() int {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ return i.nextSequence()
+}
+
+func (i *Partition) nextSequence() int {
+ i.seq++
+ return i.seq
+}
+
+// ManifestPath returns the path to the index's manifest file.
+func (i *Partition) ManifestPath() string {
+ return filepath.Join(i.path, ManifestFileName)
+}
+
+// Manifest returns a manifest for the index.
+func (i *Partition) Manifest() *Manifest {
+ m := &Manifest{
+ Levels: i.levels,
+ Files: make([]string, len(i.fileSet.files)),
+ Version: i.version,
+ path: i.ManifestPath(),
+ }
+
+ for j, f := range i.fileSet.files {
+ m.Files[j] = filepath.Base(f.Path())
+ }
+
+ return m
+}
+
+// WithLogger sets the logger for the index.
+func (i *Partition) WithLogger(logger *zap.Logger) {
+ i.logger = logger.With(zap.String("index", "tsi"))
+}
+
+// SetFieldSet sets a shared field set from the engine.
+func (i *Partition) SetFieldSet(fs *tsdb.MeasurementFieldSet) {
+ i.mu.Lock()
+ i.fieldset = fs
+ i.mu.Unlock()
+}
+
+// FieldSet returns the fieldset.
+func (i *Partition) FieldSet() *tsdb.MeasurementFieldSet {
+ i.mu.Lock()
+ fs := i.fieldset
+ i.mu.Unlock()
+ return fs
+}
+
+// RetainFileSet returns the current fileset and adds a reference count.
+func (i *Partition) RetainFileSet() (*FileSet, error) {
+ select {
+ case <-i.closing:
+ return nil, errors.New("index is closing")
+ default:
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.retainFileSet(), nil
+ }
+}
+
+func (i *Partition) retainFileSet() *FileSet {
+ fs := i.fileSet
+ fs.Retain()
+ return fs
+}
+
+// FileN returns the active files in the file set.
+func (i *Partition) FileN() int { return len(i.fileSet.files) }
+
+// prependActiveLogFile adds a new log file so that the current log file can be compacted.
+func (i *Partition) prependActiveLogFile() error {
+ // Open file and insert it into the first position.
+ f, err := i.openLogFile(filepath.Join(i.path, FormatLogFileName(i.nextSequence())))
+ if err != nil {
+ return err
+ }
+ i.activeLogFile = f
+
+ // Prepend and generate new fileset.
+ i.fileSet = i.fileSet.PrependLogFile(f)
+
+ // Write new manifest.
+ manifestSize, err := i.Manifest().Write()
+ if err != nil {
+ // TODO: Close index if write fails.
+ return err
+ }
+ i.manifestSize = manifestSize
+ return nil
+}
+
+// ForEachMeasurementName iterates over all measurement names in the index.
+func (i *Partition) ForEachMeasurementName(fn func(name []byte) error) error {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return err
+ }
+ defer fs.Release()
+
+ itr := fs.MeasurementIterator()
+ if itr == nil {
+ return nil
+ }
+
+ for e := itr.Next(); e != nil; e = itr.Next() {
+ if err := fn(e.Name()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MeasurementIterator returns an iterator over all measurement names.
+func (i *Partition) MeasurementIterator() (tsdb.MeasurementIterator, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil, err
+ }
+ itr := fs.MeasurementIterator()
+ if itr == nil {
+ fs.Release()
+ return nil, nil
+ }
+ return newFileSetMeasurementIterator(fs, NewTSDBMeasurementIteratorAdapter(itr)), nil
+}
+
+// MeasurementExists returns true if a measurement exists.
+func (i *Partition) MeasurementExists(name []byte) (bool, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return false, err
+ }
+ defer fs.Release()
+ m := fs.Measurement(name)
+ return m != nil && !m.Deleted(), nil
+}
+
+func (i *Partition) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil, err
+ }
+ defer fs.Release()
+
+ itr := fs.MeasurementIterator()
+ if itr == nil {
+ return nil, nil
+ }
+
+ var a [][]byte
+ for e := itr.Next(); e != nil; e = itr.Next() {
+ if re.Match(e.Name()) {
+ // Clone bytes since they will be used after the fileset is released.
+ a = append(a, bytesutil.Clone(e.Name()))
+ }
+ }
+ return a, nil
+}
+
+func (i *Partition) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil, err
+ }
+ return newFileSetSeriesIDIterator(fs, fs.MeasurementSeriesIDIterator(name)), nil
+}
+
+// DropMeasurement deletes a measurement from the index. DropMeasurement does
+// not remove any series from the index directly.
+func (i *Partition) DropMeasurement(name []byte) error {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return err
+ }
+ defer fs.Release()
+
+ // Delete all keys and values.
+ if kitr := fs.TagKeyIterator(name); kitr != nil {
+ for k := kitr.Next(); k != nil; k = kitr.Next() {
+ // Delete key if not already deleted.
+ if !k.Deleted() {
+ if err := func() error {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.activeLogFile.DeleteTagKey(name, k.Key())
+ }(); err != nil {
+ return err
+ }
+ }
+
+ // Delete each value in key.
+ if vitr := k.TagValueIterator(); vitr != nil {
+ for v := vitr.Next(); v != nil; v = vitr.Next() {
+ if !v.Deleted() {
+ if err := func() error {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.activeLogFile.DeleteTagValue(name, k.Key(), v.Value())
+ }(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Mark measurement as deleted.
+ if err := func() error {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.activeLogFile.DeleteMeasurement(name)
+ }(); err != nil {
+ return err
+ }
+
+ // Check if the log file needs to be swapped.
+ if err := i.CheckLogFile(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// createSeriesListIfNotExists creates a list of series if they doesn't exist in
+// bulk.
+func (i *Partition) createSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags) error {
+ // Maintain reference count on files in file set.
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return err
+ }
+ defer fs.Release()
+
+ // Ensure fileset cannot change during insert.
+ i.mu.RLock()
+ // Insert series into log file.
+ if err := i.activeLogFile.AddSeriesList(i.seriesSet, names, tagsSlice); err != nil {
+ i.mu.RUnlock()
+ return err
+ }
+ i.mu.RUnlock()
+
+ return i.CheckLogFile()
+}
+
+func (i *Partition) DropSeries(key []byte, ts int64) error {
+ // TODO: Use ts.
+
+ if err := func() error {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+
+ name, tags := models.ParseKey(key)
+ mname := []byte(name)
+ seriesID := i.sfile.SeriesID(mname, tags, nil)
+
+ // Remove from series id set.
+ i.seriesSet.Remove(seriesID)
+
+ // TODO(edd): this should only happen when there are no shards containing
+ // this series.
+ if err := i.sfile.DeleteSeriesID(seriesID); err != nil {
+ return err
+ }
+
+ return nil
+ }(); err != nil {
+ return err
+ }
+
+ // Swap log file, if necessary.
+ if err := i.CheckLogFile(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// MeasurementsSketches returns the two sketches for the index by merging all
+// instances of the type sketch types in all the index files.
+func (i *Partition) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil, nil, err
+ }
+ defer fs.Release()
+ return fs.MeasurementsSketches()
+}
+
+// HasTagKey returns true if tag key exists.
+func (i *Partition) HasTagKey(name, key []byte) (bool, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return false, err
+ }
+ defer fs.Release()
+ return fs.HasTagKey(name, key), nil
+}
+
+// HasTagValue returns true if tag value exists.
+func (i *Partition) HasTagValue(name, key, value []byte) (bool, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return false, err
+ }
+ defer fs.Release()
+ return fs.HasTagValue(name, key, value), nil
+}
+
+// TagKeyIterator returns an iterator for all keys across a single measurement.
+func (i *Partition) TagKeyIterator(name []byte) tsdb.TagKeyIterator {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil // TODO(edd): this should probably return an error.
+ }
+
+ itr := fs.TagKeyIterator(name)
+ if itr == nil {
+ fs.Release()
+ return nil
+ }
+ return newFileSetTagKeyIterator(fs, NewTSDBTagKeyIteratorAdapter(itr))
+}
+
+// TagValueIterator returns an iterator for all values across a single key.
+func (i *Partition) TagValueIterator(name, key []byte) tsdb.TagValueIterator {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil // TODO(edd): this should probably return an error.
+ }
+
+ itr := fs.TagValueIterator(name, key)
+ if itr == nil {
+ fs.Release()
+ return nil
+ }
+ return newFileSetTagValueIterator(fs, NewTSDBTagValueIteratorAdapter(itr))
+}
+
+// TagKeySeriesIDIterator returns a series iterator for all values across a single key.
+func (i *Partition) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil // TODO(edd): this should probably return an error.
+ }
+
+ itr := fs.TagKeySeriesIDIterator(name, key)
+ if itr == nil {
+ fs.Release()
+ return nil
+ }
+ return newFileSetSeriesIDIterator(fs, itr)
+}
+
+// TagValueSeriesIDIterator returns a series iterator for a single key value.
+func (i *Partition) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil // TODO(edd): this should probably return an error.
+ }
+
+ itr := fs.TagValueSeriesIDIterator(name, key, value)
+ if itr == nil {
+ fs.Release()
+ return nil
+ }
+ return newFileSetSeriesIDIterator(fs, itr)
+}
+
+// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.
+func (i *Partition) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return nil, err
+ }
+ defer fs.Release()
+
+ return fs.MeasurementTagKeysByExpr(name, expr)
+}
+
+// ForEachMeasurementTagKey iterates over all tag keys in a measurement.
+func (i *Partition) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
+ fs, err := i.RetainFileSet()
+ if err != nil {
+ return err
+ }
+ defer fs.Release()
+
+ itr := fs.TagKeyIterator(name)
+ if itr == nil {
+ return nil
+ }
+
+ for e := itr.Next(); e != nil; e = itr.Next() {
+ if err := fn(e.Key()); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// TagKeyCardinality always returns zero.
+// It is not possible to determine cardinality of tags across index files.
+func (i *Partition) TagKeyCardinality(name, key []byte) int {
+ return 0
+}
+
+func (i *Partition) SetFieldName(measurement []byte, name string) {}
+func (i *Partition) RemoveShard(shardID uint64) {}
+func (i *Partition) AssignShard(k string, shardID uint64) {}
+
+// Compact requests a compaction of log files.
+func (i *Partition) Compact() {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ i.compact()
+}
+
+// compact compacts continguous groups of files that are not currently compacting.
+func (i *Partition) compact() {
+ if i.compactionsDisabled {
+ return
+ }
+
+ fs := i.retainFileSet()
+ defer fs.Release()
+
+ // Iterate over each level we are going to compact.
+ // We skip the first level (0) because it is log files and they are compacted separately.
+ // We skip the last level because the files have no higher level to compact into.
+ minLevel, maxLevel := 1, len(i.levels)-2
+ for level := minLevel; level <= maxLevel; level++ {
+ // Skip level if it is currently compacting.
+ if i.levelCompacting[level] {
+ continue
+ }
+
+ // Collect contiguous files from the end of the level.
+ files := fs.LastContiguousIndexFilesByLevel(level)
+ if len(files) < 2 {
+ continue
+ } else if len(files) > MaxIndexMergeCount {
+ files = files[len(files)-MaxIndexMergeCount:]
+ }
+
+ // Retain files during compaction.
+ IndexFiles(files).Retain()
+
+ // Mark the level as compacting.
+ i.levelCompacting[level] = true
+
+ // Execute in closure to save reference to the group within the loop.
+ func(files []*IndexFile, level int) {
+ // Start compacting in a separate goroutine.
+ i.wg.Add(1)
+ go func() {
+ defer i.wg.Done()
+
+ // Compact to a new level.
+ i.compactToLevel(files, level+1)
+
+ // Ensure compaction lock for the level is released.
+ i.mu.Lock()
+ i.levelCompacting[level] = false
+ i.mu.Unlock()
+
+ // Check for new compactions
+ i.Compact()
+ }()
+ }(files, level)
+ }
+}
+
+// compactToLevel compacts a set of files into a new file. Replaces old files with
+// compacted file on successful completion. This runs in a separate goroutine.
+func (i *Partition) compactToLevel(files []*IndexFile, level int) {
+ assert(len(files) >= 2, "at least two index files are required for compaction")
+ assert(level > 0, "cannot compact level zero")
+
+ // Build a logger for this compaction.
+ logger := i.logger.With(zap.String("token", generateCompactionToken()))
+
+ // Files have already been retained by caller.
+ // Ensure files are released only once.
+ var once sync.Once
+ defer once.Do(func() { IndexFiles(files).Release() })
+
+ // Track time to compact.
+ start := time.Now()
+
+ // Create new index file.
+ path := filepath.Join(i.path, FormatIndexFileName(i.NextSequence(), level))
+ f, err := os.Create(path)
+ if err != nil {
+ logger.Error("cannot create compation files", zap.Error(err))
+ return
+ }
+ defer f.Close()
+
+ logger.Info("performing full compaction",
+ zap.String("src", joinIntSlice(IndexFiles(files).IDs(), ",")),
+ zap.String("dst", path),
+ )
+
+ // Compact all index files to new index file.
+ lvl := i.levels[level]
+ n, err := IndexFiles(files).CompactTo(f, i.sfile, lvl.M, lvl.K)
+ if err != nil {
+ logger.Error("cannot compact index files", zap.Error(err))
+ return
+ }
+
+ // Close file.
+ if err := f.Close(); err != nil {
+ logger.Error("error closing index file", zap.Error(err))
+ return
+ }
+
+ // Reopen as an index file.
+ file := NewIndexFile(i.sfile)
+ file.SetPath(path)
+ if err := file.Open(); err != nil {
+ logger.Error("cannot open new index file", zap.Error(err))
+ return
+ }
+
+ // Obtain lock to swap in index file and write manifest.
+ if err := func() error {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ // Replace previous files with new index file.
+ i.fileSet = i.fileSet.MustReplace(IndexFiles(files).Files(), file)
+
+ // Write new manifest.
+ manifestSize, err := i.Manifest().Write()
+ if err != nil {
+ // TODO: Close index if write fails.
+ return err
+ }
+ i.manifestSize = manifestSize
+ return nil
+ }(); err != nil {
+ logger.Error("cannot write manifest", zap.Error(err))
+ return
+ }
+
+ elapsed := time.Since(start)
+ logger.Info("full compaction complete",
+ zap.String("path", path),
+ zap.String("elapsed", elapsed.String()),
+ zap.Int64("bytes", n),
+ zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024),
+ )
+
+ // Release old files.
+ once.Do(func() { IndexFiles(files).Release() })
+
+ // Close and delete all old index files.
+ for _, f := range files {
+ logger.Info("removing index file", zap.String("path", f.Path()))
+
+ if err := f.Close(); err != nil {
+ logger.Error("cannot close index file", zap.Error(err))
+ return
+ } else if err := os.Remove(f.Path()); err != nil {
+ logger.Error("cannot remove index file", zap.Error(err))
+ return
+ }
+ }
+}
+
+func (i *Partition) Rebuild() {}
+
+func (i *Partition) CheckLogFile() error {
+ // Check log file size under read lock.
+ if size := func() int64 {
+ i.mu.RLock()
+ defer i.mu.RUnlock()
+ return i.activeLogFile.Size()
+ }(); size < i.MaxLogFileSize {
+ return nil
+ }
+
+ // If file size exceeded then recheck under write lock and swap files.
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ return i.checkLogFile()
+}
+
+func (i *Partition) checkLogFile() error {
+ if i.activeLogFile.Size() < i.MaxLogFileSize {
+ return nil
+ }
+
+ // Swap current log file.
+ logFile := i.activeLogFile
+
+ // Open new log file and insert it into the first position.
+ if err := i.prependActiveLogFile(); err != nil {
+ return err
+ }
+
+ // Begin compacting in a background goroutine.
+ i.wg.Add(1)
+ go func() {
+ defer i.wg.Done()
+ i.compactLogFile(logFile)
+ i.Compact() // check for new compactions
+ }()
+
+ return nil
+}
+
+// compactLogFile compacts f into a tsi file. The new file will share the
+// same identifier but will have a ".tsi" extension. Once the log file is
+// compacted then the manifest is updated and the log file is discarded.
+func (i *Partition) compactLogFile(logFile *LogFile) {
+ start := time.Now()
+
+ // Retrieve identifier from current path.
+ id := logFile.ID()
+ assert(id != 0, "cannot parse log file id: %s", logFile.Path())
+
+ // Build a logger for this compaction.
+ logger := i.logger.With(
+ zap.String("token", generateCompactionToken()),
+ zap.Int("id", id),
+ )
+
+ // Create new index file.
+ path := filepath.Join(i.path, FormatIndexFileName(id, 1))
+ f, err := os.Create(path)
+ if err != nil {
+ logger.Error("cannot create index file", zap.Error(err))
+ return
+ }
+ defer f.Close()
+
+ // Compact log file to new index file.
+ lvl := i.levels[1]
+ n, err := logFile.CompactTo(f, lvl.M, lvl.K)
+ if err != nil {
+ logger.Error("cannot compact log file", zap.Error(err), zap.String("path", logFile.Path()))
+ return
+ }
+
+ // Close file.
+ if err := f.Close(); err != nil {
+ logger.Error("cannot close log file", zap.Error(err))
+ return
+ }
+
+ // Reopen as an index file.
+ file := NewIndexFile(i.sfile)
+ file.SetPath(path)
+ if err := file.Open(); err != nil {
+ logger.Error("cannot open compacted index file", zap.Error(err), zap.String("path", file.Path()))
+ return
+ }
+
+ // Obtain lock to swap in index file and write manifest.
+ if err := func() error {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ // Replace previous log file with index file.
+ i.fileSet = i.fileSet.MustReplace([]File{logFile}, file)
+
+ // Write new manifest.
+ manifestSize, err := i.Manifest().Write()
+ if err != nil {
+ // TODO: Close index if write fails.
+ return err
+ }
+
+ i.manifestSize = manifestSize
+ return nil
+ }(); err != nil {
+ logger.Error("cannot update manifest", zap.Error(err))
+ return
+ }
+
+ elapsed := time.Since(start)
+ logger.Info("log file compacted",
+ zap.String("elapsed", elapsed.String()),
+ zap.Int64("bytes", n),
+ zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024),
+ )
+
+ // Closing the log file will automatically wait until the ref count is zero.
+ if err := logFile.Close(); err != nil {
+ logger.Error("cannot close log file", zap.Error(err))
+ return
+ } else if err := os.Remove(logFile.Path()); err != nil {
+ logger.Error("cannot remove log file", zap.Error(err))
+ return
+ }
+
+ return
+}
+
+// unionStringSets returns the union of two sets
+func unionStringSets(a, b map[string]struct{}) map[string]struct{} {
+ other := make(map[string]struct{})
+ for k := range a {
+ other[k] = struct{}{}
+ }
+ for k := range b {
+ other[k] = struct{}{}
+ }
+ return other
+}
+
+// intersectStringSets returns the intersection of two sets.
+func intersectStringSets(a, b map[string]struct{}) map[string]struct{} {
+ if len(a) < len(b) {
+ a, b = b, a
+ }
+
+ other := make(map[string]struct{})
+ for k := range a {
+ if _, ok := b[k]; ok {
+ other[k] = struct{}{}
+ }
+ }
+ return other
+}
+
+var fileIDRegex = regexp.MustCompile(`^L(\d+)-(\d+)\..+$`)
+
+// ParseFilename extracts the numeric id from a log or index file path.
+// Returns 0 if it cannot be parsed.
+func ParseFilename(name string) (level, id int) {
+ a := fileIDRegex.FindStringSubmatch(filepath.Base(name))
+ if a == nil {
+ return 0, 0
+ }
+
+ level, _ = strconv.Atoi(a[1])
+ id, _ = strconv.Atoi(a[2])
+ return id, level
+}
+
+// Manifest represents the list of log & index files that make up the index.
+// The files are listed in time order, not necessarily ID order.
+type Manifest struct {
+ Levels []CompactionLevel `json:"levels,omitempty"`
+ Files []string `json:"files,omitempty"`
+
+ // Version should be updated whenever the TSI format has changed.
+ Version int `json:"version,omitempty"`
+
+ path string // location on disk of the manifest.
+}
+
+// NewManifest returns a new instance of Manifest with default compaction levels.
+func NewManifest(path string) *Manifest {
+ m := &Manifest{
+ Levels: make([]CompactionLevel, len(DefaultCompactionLevels)),
+ Version: Version,
+ path: path,
+ }
+ copy(m.Levels, DefaultCompactionLevels[:])
+ return m
+}
+
+// HasFile returns true if name is listed in the log files or index files.
+func (m *Manifest) HasFile(name string) bool {
+ for _, filename := range m.Files {
+ if filename == name {
+ return true
+ }
+ }
+ return false
+}
+
+// Validate checks if the Manifest's version is compatible with this version
+// of the tsi1 index.
+func (m *Manifest) Validate() error {
+ // If we don't have an explicit version in the manifest file then we know
+ // it's not compatible with the latest tsi1 Index.
+ if m.Version != Version {
+ return ErrIncompatibleVersion
+ }
+ return nil
+}
+
+// Write writes the manifest file to the provided path, returning the number of
+// bytes written and an error, if any.
+func (m *Manifest) Write() (int64, error) {
+ buf, err := json.MarshalIndent(m, "", " ")
+ if err != nil {
+ return 0, err
+ }
+ buf = append(buf, '\n')
+
+ if err := ioutil.WriteFile(m.path, buf, 0666); err != nil {
+ return 0, err
+ }
+ return int64(len(buf)), nil
+}
+
+// ReadManifestFile reads a manifest from a file path and returns the Manifest,
+// the size of the manifest on disk, and any error if appropriate.
+func ReadManifestFile(path string) (*Manifest, int64, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // Decode manifest.
+ var m Manifest
+ if err := json.Unmarshal(buf, &m); err != nil {
+ return nil, 0, err
+ }
+
+ // Set the path of the manifest.
+ m.path = path
+ return &m, int64(len(buf)), nil
+}
+
+func joinIntSlice(a []int, sep string) string {
+ other := make([]string, len(a))
+ for i := range a {
+ other[i] = strconv.Itoa(a[i])
+ }
+ return strings.Join(other, sep)
+}
+
+// CompactionLevel represents a grouping of index files based on bloom filter
+// settings. By having the same bloom filter settings, the filters
+// can be merged and evaluated at a higher level.
+type CompactionLevel struct {
+ // Bloom filter bit size & hash count
+ M uint64 `json:"m,omitempty"`
+ K uint64 `json:"k,omitempty"`
+}
+
+// DefaultCompactionLevels is the default settings used by the index.
+var DefaultCompactionLevels = []CompactionLevel{
+ {M: 0, K: 0}, // L0: Log files, no filter.
+ {M: 1 << 25, K: 6}, // L1: Initial compaction
+ {M: 1 << 25, K: 6}, // L2
+ {M: 1 << 26, K: 6}, // L3
+ {M: 1 << 27, K: 6}, // L4
+ {M: 1 << 28, K: 6}, // L5
+ {M: 1 << 29, K: 6}, // L6
+ {M: 1 << 30, K: 6}, // L7
+}
+
+// MaxIndexMergeCount is the maximum number of files that can be merged together at once.
+const MaxIndexMergeCount = 2
+
+// MaxIndexFileSize is the maximum expected size of an index file.
+const MaxIndexFileSize = 4 * (1 << 30)
+
+// generateCompactionToken returns a short token to track an individual compaction.
+// It is only used for logging so it doesn't need strong uniqueness guarantees.
+func generateCompactionToken() string {
+ token := make([]byte, 3)
+ rand.Read(token)
+ return fmt.Sprintf("%x", token)
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go
new file mode 100644
index 0000000000..c278863e72
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go
@@ -0,0 +1,119 @@
+package tsi1_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/influxdata/influxdb/tsdb"
+ "github.com/influxdata/influxdb/tsdb/index/tsi1"
+)
+
+func TestPartition_Open(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ // Opening a fresh index should set the MANIFEST version to current version.
+ p := NewPartition(sfile.SeriesFile)
+ t.Run("open new index", func(t *testing.T) {
+ if err := p.Open(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check version set appropriately.
+ if got, exp := p.Manifest().Version, 1; got != exp {
+ t.Fatalf("got index version %d, expected %d", got, exp)
+ }
+ })
+
+ // Reopening an open index should return an error.
+ t.Run("reopen open index", func(t *testing.T) {
+ err := p.Open()
+ if err == nil {
+ p.Close()
+ t.Fatal("didn't get an error on reopen, but expected one")
+ }
+ p.Close()
+ })
+
+ // Opening an incompatible index should return an error.
+ incompatibleVersions := []int{-1, 0, 2}
+ for _, v := range incompatibleVersions {
+ t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) {
+ p = NewPartition(sfile.SeriesFile)
+ // Manually create a MANIFEST file for an incompatible index version.
+ mpath := filepath.Join(p.Path(), tsi1.ManifestFileName)
+ m := tsi1.NewManifest(mpath)
+ m.Levels = nil
+ m.Version = v // Set example MANIFEST version.
+ if _, err := m.Write(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Log the MANIFEST file.
+ data, err := ioutil.ReadFile(mpath)
+ if err != nil {
+ panic(err)
+ }
+ t.Logf("Incompatible MANIFEST: %s", data)
+
+ // Opening this index should return an error because the MANIFEST has an
+ // incompatible version.
+ err = p.Open()
+ if err != tsi1.ErrIncompatibleVersion {
+ p.Close()
+ t.Fatalf("got error %v, expected %v", err, tsi1.ErrIncompatibleVersion)
+ }
+ })
+ }
+}
+
+func TestPartition_Manifest(t *testing.T) {
+ t.Run("current MANIFEST", func(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ p := MustOpenPartition(sfile.SeriesFile)
+ if got, exp := p.Manifest().Version, tsi1.Version; got != exp {
+ t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
+ }
+ })
+}
+
+// Partition is a test wrapper for tsi1.Partition.
+type Partition struct {
+ *tsi1.Partition
+}
+
+// NewPartition returns a new instance of Partition at a temporary path.
+func NewPartition(sfile *tsdb.SeriesFile) *Partition {
+ return &Partition{Partition: tsi1.NewPartition(sfile, MustTempPartitionDir())}
+}
+
+// MustOpenPartition returns a new, open index. Panic on error.
+func MustOpenPartition(sfile *tsdb.SeriesFile) *Partition {
+ p := NewPartition(sfile)
+ if err := p.Open(); err != nil {
+ panic(err)
+ }
+ return p
+}
+
+// Close closes and removes the index directory.
+func (p *Partition) Close() error {
+ defer os.RemoveAll(p.Path())
+ return p.Partition.Close()
+}
+
+// Reopen closes and opens the index.
+func (p *Partition) Reopen() error {
+ if err := p.Partition.Close(); err != nil {
+ return err
+ }
+
+ sfile, path := p.SeriesFile(), p.Path()
+ p.Partition = tsi1.NewPartition(sfile, path)
+ return p.Open()
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go
deleted file mode 100644
index 0f361cb165..0000000000
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go
+++ /dev/null
@@ -1,990 +0,0 @@
-package tsi1
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "os"
- "sort"
-
- "github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/pkg/bloom"
- "github.com/influxdata/influxdb/pkg/estimator"
- "github.com/influxdata/influxdb/pkg/estimator/hll"
- "github.com/influxdata/influxdb/pkg/mmap"
- "github.com/influxdata/influxdb/pkg/rhh"
- "github.com/influxdata/influxdb/tsdb"
- "github.com/influxdata/influxql"
-)
-
-// ErrSeriesOverflow is returned when too many series are added to a series writer.
-var ErrSeriesOverflow = errors.New("series overflow")
-
-// Series list field size constants.
-const (
- // Series list trailer field sizes.
- SeriesBlockTrailerSize = 0 +
- 4 + 4 + // series data offset/size
- 4 + 4 + 4 + // series index offset/size/capacity
- 8 + 4 + 4 + // bloom filter false positive rate, offset/size
- 4 + 4 + // series sketch offset/size
- 4 + 4 + // tombstone series sketch offset/size
- 4 + 4 + // series count and tombstone count
- 0
-
- // Other field sizes
- SeriesCountSize = 4
- SeriesIDSize = 4
-)
-
-// Series flag constants.
-const (
- // Marks the series as having been deleted.
- SeriesTombstoneFlag = 0x01
-
- // Marks the following bytes as a hash index.
- // These bytes should be skipped by an iterator.
- SeriesHashIndexFlag = 0x02
-)
-
-// MaxSeriesBlockHashSize is the maximum number of series in a single hash.
-const MaxSeriesBlockHashSize = (1048576 * LoadFactor) / 100
-
-// SeriesBlock represents the section of the index that holds series data.
-type SeriesBlock struct {
- data []byte
-
- // Series data & index/capacity.
- seriesData []byte
- seriesIndexes []seriesBlockIndex
-
- // Exact series counts for this block.
- seriesN int32
- tombstoneN int32
-
- // Bloom filter used for fast series existence check.
- filter *bloom.Filter
-
- // Series block sketch and tombstone sketch for cardinality estimation.
- // While we have exact counts for the block, these sketches allow us to
- // estimate cardinality across multiple blocks (which might contain
- // duplicate series).
- sketch, tsketch estimator.Sketch
-}
-
-// HasSeries returns flags indicating if the series exists and if it is tombstoned.
-func (blk *SeriesBlock) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {
- offset, tombstoned := blk.Offset(name, tags, buf)
- return offset != 0, tombstoned
-}
-
-// Series returns a series element.
-func (blk *SeriesBlock) Series(name []byte, tags models.Tags) tsdb.SeriesElem {
- offset, _ := blk.Offset(name, tags, nil)
- if offset == 0 {
- return nil
- }
-
- var e SeriesBlockElem
- e.UnmarshalBinary(blk.data[offset:])
- return &e
-}
-
-// Offset returns the byte offset of the series within the block.
-func (blk *SeriesBlock) Offset(name []byte, tags models.Tags, buf []byte) (offset uint32, tombstoned bool) {
- // Exit if no series indexes exist.
- if len(blk.seriesIndexes) == 0 {
- return 0, false
- }
-
- // Compute series key.
- buf = AppendSeriesKey(buf[:0], name, tags)
- bufN := uint32(len(buf))
-
- // Quickly check the bloom filter.
- // If the key doesn't exist then we know for sure that it doesn't exist.
- // If it does exist then we need to do a hash index check to verify. False
- // positives are possible with a bloom filter.
- if !blk.filter.Contains(buf) {
- return 0, false
- }
-
- // Find the correct partition.
- // Use previous index unless an exact match on the min value.
- i := sort.Search(len(blk.seriesIndexes), func(i int) bool {
- return CompareSeriesKeys(blk.seriesIndexes[i].min, buf) != -1
- })
- if i >= len(blk.seriesIndexes) || !bytes.Equal(blk.seriesIndexes[i].min, buf) {
- i--
- }
- seriesIndex := blk.seriesIndexes[i]
-
- // Search within partition.
- n := int64(seriesIndex.capacity)
- hash := rhh.HashKey(buf)
- pos := hash % n
-
- // Track current distance
- var d int64
- for {
- // Find offset of series.
- offset := binary.BigEndian.Uint32(seriesIndex.data[pos*SeriesIDSize:])
- if offset == 0 {
- return 0, false
- }
-
- // Evaluate encoded value matches expected.
- key := ReadSeriesKey(blk.data[offset+1 : offset+1+bufN])
- if bytes.Equal(buf, key) {
- return offset, (blk.data[offset] & SeriesTombstoneFlag) != 0
- }
-
- // Check if we've exceeded the probe distance.
- max := rhh.Dist(rhh.HashKey(key), pos, n)
- if d > max {
- return 0, false
- }
-
- // Move position forward.
- pos = (pos + 1) % n
- d++
-
- if d > n {
- return 0, false
- }
- }
-}
-
-// SeriesCount returns the number of series.
-func (blk *SeriesBlock) SeriesCount() uint32 {
- return uint32(blk.seriesN + blk.tombstoneN)
-}
-
-// SeriesIterator returns an iterator over all the series.
-func (blk *SeriesBlock) SeriesIterator() tsdb.SeriesIterator {
- return &seriesBlockIterator{
- n: blk.SeriesCount(),
- offset: 1,
- sblk: blk,
- }
-}
-
-// UnmarshalBinary unpacks data into the series list.
-//
-// If data is an mmap then it should stay open until the series list is no
-// longer used because data access is performed directly from the byte slice.
-func (blk *SeriesBlock) UnmarshalBinary(data []byte) error {
- t := ReadSeriesBlockTrailer(data)
-
- // Save entire block.
- blk.data = data
-
- // Slice series data.
- blk.seriesData = data[t.Series.Data.Offset:]
- blk.seriesData = blk.seriesData[:t.Series.Data.Size]
-
- // Read in all index partitions.
- buf := data[t.Series.Index.Offset:]
- buf = buf[:t.Series.Index.Size]
- blk.seriesIndexes = make([]seriesBlockIndex, t.Series.Index.N)
- for i := range blk.seriesIndexes {
- idx := &blk.seriesIndexes[i]
-
- // Read data block.
- var offset, size uint32
- offset, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]
- size, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]
- idx.data = blk.data[offset : offset+size]
-
- // Read block capacity.
- idx.capacity, buf = int32(binary.BigEndian.Uint32(buf[:4])), buf[4:]
-
- // Read min key.
- var n uint32
- n, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]
- idx.min, buf = buf[:n], buf[n:]
- }
- if len(buf) != 0 {
- return fmt.Errorf("data remaining in index list buffer: %d", len(buf))
- }
-
- // Initialize bloom filter.
- filter, err := bloom.NewFilterBuffer(data[t.Bloom.Offset:][:t.Bloom.Size], t.Bloom.K)
- if err != nil {
- return err
- }
- blk.filter = filter
-
- // Initialise sketches. We're currently using HLL+.
- var s, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus()
- if err := s.UnmarshalBinary(data[t.Sketch.Offset:][:t.Sketch.Size]); err != nil {
- return err
- }
- blk.sketch = s
-
- if err := ts.UnmarshalBinary(data[t.TSketch.Offset:][:t.TSketch.Size]); err != nil {
- return err
- }
- blk.tsketch = ts
-
- // Set the series and tombstone counts
- blk.seriesN, blk.tombstoneN = t.SeriesN, t.TombstoneN
-
- return nil
-}
-
-// seriesBlockIndex represents a partitioned series block index.
-type seriesBlockIndex struct {
- data []byte
- min []byte
- capacity int32
-}
-
-// seriesBlockIterator is an iterator over a series ids in a series list.
-type seriesBlockIterator struct {
- i, n uint32
- offset uint32
- sblk *SeriesBlock
- e SeriesBlockElem // buffer
-}
-
-// Next returns the next series element.
-func (itr *seriesBlockIterator) Next() tsdb.SeriesElem {
- for {
- // Exit if at the end.
- if itr.i == itr.n {
- return nil
- }
-
- // If the current element is a hash index partition then skip it.
- if flag := itr.sblk.data[itr.offset]; flag&SeriesHashIndexFlag != 0 {
- // Skip flag
- itr.offset++
-
- // Read index capacity.
- n := binary.BigEndian.Uint32(itr.sblk.data[itr.offset:])
- itr.offset += 4
-
- // Skip over index.
- itr.offset += n * SeriesIDSize
- continue
- }
-
- // Read next element.
- itr.e.UnmarshalBinary(itr.sblk.data[itr.offset:])
-
- // Move iterator and offset forward.
- itr.i++
- itr.offset += uint32(itr.e.size)
-
- return &itr.e
- }
-}
-
-// seriesDecodeIterator decodes a series id iterator into unmarshaled elements.
-type seriesDecodeIterator struct {
- itr seriesIDIterator
- sblk *SeriesBlock
- e SeriesBlockElem // buffer
-}
-
-// newSeriesDecodeIterator returns a new instance of seriesDecodeIterator.
-func newSeriesDecodeIterator(sblk *SeriesBlock, itr seriesIDIterator) *seriesDecodeIterator {
- return &seriesDecodeIterator{sblk: sblk, itr: itr}
-}
-
-// Next returns the next series element.
-func (itr *seriesDecodeIterator) Next() tsdb.SeriesElem {
- // Read next series id.
- id := itr.itr.next()
- if id == 0 {
- return nil
- }
-
- // Read next element.
- itr.e.UnmarshalBinary(itr.sblk.data[id:])
- return &itr.e
-}
-
-// SeriesBlockElem represents a series element in the series list.
-type SeriesBlockElem struct {
- flag byte
- name []byte
- tags models.Tags
- size int
-}
-
-// Deleted returns true if the tombstone flag is set.
-func (e *SeriesBlockElem) Deleted() bool { return (e.flag & SeriesTombstoneFlag) != 0 }
-
-// Name returns the measurement name.
-func (e *SeriesBlockElem) Name() []byte { return e.name }
-
-// Tags returns the tag set.
-func (e *SeriesBlockElem) Tags() models.Tags { return e.tags }
-
-// Expr always returns a nil expression.
-// This is only used by higher level query planning.
-func (e *SeriesBlockElem) Expr() influxql.Expr { return nil }
-
-// UnmarshalBinary unmarshals data into e.
-func (e *SeriesBlockElem) UnmarshalBinary(data []byte) error {
- start := len(data)
-
- // Parse flag data.
- e.flag, data = data[0], data[1:]
-
- // Parse total size.
- _, szN := binary.Uvarint(data)
- data = data[szN:]
-
- // Parse name.
- n, data := binary.BigEndian.Uint16(data[:2]), data[2:]
- e.name, data = data[:n], data[n:]
-
- // Parse tags.
- e.tags = e.tags[:0]
- tagN, szN := binary.Uvarint(data)
- data = data[szN:]
-
- for i := uint64(0); i < tagN; i++ {
- var tag models.Tag
-
- n, data = binary.BigEndian.Uint16(data[:2]), data[2:]
- tag.Key, data = data[:n], data[n:]
-
- n, data = binary.BigEndian.Uint16(data[:2]), data[2:]
- tag.Value, data = data[:n], data[n:]
-
- e.tags = append(e.tags, tag)
- }
-
- // Save length of elem.
- e.size = start - len(data)
-
- return nil
-}
-
-// AppendSeriesElem serializes flag/name/tags to dst and returns the new buffer.
-func AppendSeriesElem(dst []byte, flag byte, name []byte, tags models.Tags) []byte {
- dst = append(dst, flag)
- return AppendSeriesKey(dst, name, tags)
-}
-
-// AppendSeriesKey serializes name and tags to a byte slice.
-// The total length is prepended as a uvarint.
-func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte {
- buf := make([]byte, binary.MaxVarintLen32)
- origLen := len(dst)
-
- // The tag count is variable encoded, so we need to know ahead of time what
- // the size of the tag count value will be.
- tcBuf := make([]byte, binary.MaxVarintLen32)
- tcSz := binary.PutUvarint(tcBuf, uint64(len(tags)))
-
- // Size of name/tags. Does not include total length.
- size := 0 + //
- 2 + // size of measurement
- len(name) + // measurement
- tcSz + // size of number of tags
- (4 * len(tags)) + // length of each tag key and value
- tags.Size() // size of tag keys/values
-
- // Variable encode length.
- totalSz := binary.PutUvarint(buf, uint64(size))
-
- // If caller doesn't provide a buffer then pre-allocate an exact one.
- if dst == nil {
- dst = make([]byte, 0, size+totalSz)
- }
-
- // Append total length.
- dst = append(dst, buf[:totalSz]...)
-
- // Append name.
- binary.BigEndian.PutUint16(buf, uint16(len(name)))
- dst = append(dst, buf[:2]...)
- dst = append(dst, name...)
-
- // Append tag count.
- dst = append(dst, tcBuf[:tcSz]...)
-
- // Append tags.
- for _, tag := range tags {
- binary.BigEndian.PutUint16(buf, uint16(len(tag.Key)))
- dst = append(dst, buf[:2]...)
- dst = append(dst, tag.Key...)
-
- binary.BigEndian.PutUint16(buf, uint16(len(tag.Value)))
- dst = append(dst, buf[:2]...)
- dst = append(dst, tag.Value...)
- }
-
- // Verify that the total length equals the encoded byte count.
- if got, exp := len(dst)-origLen, size+totalSz; got != exp {
- panic(fmt.Sprintf("series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x", got, exp, dst))
- }
-
- return dst
-}
-
-// ReadSeriesKey returns the series key from the beginning of the buffer.
-func ReadSeriesKey(data []byte) []byte {
- sz, n := binary.Uvarint(data)
- return data[:int(sz)+n]
-}
-
-func CompareSeriesKeys(a, b []byte) int {
- // Handle 'nil' keys.
- if len(a) == 0 && len(b) == 0 {
- return 0
- } else if len(a) == 0 {
- return -1
- } else if len(b) == 0 {
- return 1
- }
-
- // Read total size.
- _, i := binary.Uvarint(a)
- a = a[i:]
- _, i = binary.Uvarint(b)
- b = b[i:]
-
- // Read names.
- var n uint16
- n, a = binary.BigEndian.Uint16(a), a[2:]
- name0, a := a[:n], a[n:]
- n, b = binary.BigEndian.Uint16(b), b[2:]
- name1, b := b[:n], b[n:]
-
- // Compare names, return if not equal.
- if cmp := bytes.Compare(name0, name1); cmp != 0 {
- return cmp
- }
-
- // Read tag counts.
- tagN0, i := binary.Uvarint(a)
- a = a[i:]
-
- tagN1, i := binary.Uvarint(b)
- b = b[i:]
-
- // Compare each tag in order.
- for i := uint64(0); ; i++ {
- // Check for EOF.
- if i == tagN0 && i == tagN1 {
- return 0
- } else if i == tagN0 {
- return -1
- } else if i == tagN1 {
- return 1
- }
-
- // Read keys.
- var key0, key1 []byte
- n, a = binary.BigEndian.Uint16(a), a[2:]
- key0, a = a[:n], a[n:]
- n, b = binary.BigEndian.Uint16(b), b[2:]
- key1, b = b[:n], b[n:]
-
- // Compare keys.
- if cmp := bytes.Compare(key0, key1); cmp != 0 {
- return cmp
- }
-
- // Read values.
- var value0, value1 []byte
- n, a = binary.BigEndian.Uint16(a), a[2:]
- value0, a = a[:n], a[n:]
- n, b = binary.BigEndian.Uint16(b), b[2:]
- value1, b = b[:n], b[n:]
-
- // Compare values.
- if cmp := bytes.Compare(value0, value1); cmp != 0 {
- return cmp
- }
- }
-}
-
-type seriesKeys [][]byte
-
-func (a seriesKeys) Len() int { return len(a) }
-func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a seriesKeys) Less(i, j int) bool {
- return CompareSeriesKeys(a[i], a[j]) == -1
-}
-
-// SeriesBlockEncoder encodes series to a SeriesBlock in an underlying writer.
-type SeriesBlockEncoder struct {
- w io.Writer
-
- // Double buffer for writing series.
- // First elem is current buffer, second is previous buffer.
- buf [2][]byte
-
- // Track bytes written, sections, & offsets.
- n int64
- trailer SeriesBlockTrailer
- offsets *rhh.HashMap
- indexMin []byte
- indexes []seriesBlockIndexEncodeInfo
-
- // Bloom filter to check for series existance.
- filter *bloom.Filter
-
- // Series sketch and tombstoned series sketch. These must be
- // set before calling WriteTo.
- sketch, tSketch estimator.Sketch
-}
-
-// NewSeriesBlockEncoder returns a new instance of SeriesBlockEncoder.
-func NewSeriesBlockEncoder(w io.Writer, n uint32, m, k uint64) *SeriesBlockEncoder {
- return &SeriesBlockEncoder{
- w: w,
-
- offsets: rhh.NewHashMap(rhh.Options{
- Capacity: MaxSeriesBlockHashSize,
- LoadFactor: LoadFactor,
- }),
-
- filter: bloom.NewFilter(m, k),
-
- sketch: hll.NewDefaultPlus(),
- tSketch: hll.NewDefaultPlus(),
- }
-}
-
-// N returns the number of bytes written.
-func (enc *SeriesBlockEncoder) N() int64 { return enc.n }
-
-// Encode writes a series to the underlying writer.
-// The series must be lexicographical sorted after the previous encoded series.
-func (enc *SeriesBlockEncoder) Encode(name []byte, tags models.Tags, deleted bool) error {
- // An initial empty byte must be written.
- if err := enc.ensureHeaderWritten(); err != nil {
- return err
- }
-
- // Generate the series element.
- buf := AppendSeriesElem(enc.buf[0][:0], encodeSerieFlag(deleted), name, tags)
-
- // Verify series is after previous series.
- if enc.buf[1] != nil {
- // Skip the first byte since it is the flag. Remaining bytes are key.
- key0, key1 := buf[1:], enc.buf[1][1:]
-
- if cmp := CompareSeriesKeys(key0, key1); cmp == -1 {
- return fmt.Errorf("series out of order: prev=%q, new=%q", enc.buf[1], buf)
- } else if cmp == 0 {
- return fmt.Errorf("series already encoded: %s", buf)
- }
- }
-
- // Flush a hash index, if necessary.
- if err := enc.checkFlushIndex(buf[1:]); err != nil {
- return err
- }
-
- // Swap double buffer.
- enc.buf[0], enc.buf[1] = enc.buf[1], buf
-
- // Write encoded series to writer.
- offset := enc.n
- if err := writeTo(enc.w, buf, &enc.n); err != nil {
- return err
- }
-
- // Save offset to generate index later.
- // Key is copied by the RHH map.
- enc.offsets.Put(buf[1:], uint32(offset))
-
- // Update bloom filter.
- enc.filter.Insert(buf[1:])
-
- // Update sketches & trailer.
- if deleted {
- enc.trailer.TombstoneN++
- enc.tSketch.Add(buf)
- } else {
- enc.trailer.SeriesN++
- enc.sketch.Add(buf)
- }
-
- return nil
-}
-
-// Close writes the index and trailer.
-// This should be called at the end once all series have been encoded.
-func (enc *SeriesBlockEncoder) Close() error {
- if err := enc.ensureHeaderWritten(); err != nil {
- return err
- }
-
- // Flush outstanding hash index.
- if err := enc.flushIndex(); err != nil {
- return err
- }
-
- // Write dictionary-encoded series list.
- enc.trailer.Series.Data.Offset = 1
- enc.trailer.Series.Data.Size = int32(enc.n) - enc.trailer.Series.Data.Offset
-
- // Write dictionary-encoded series hash index.
- enc.trailer.Series.Index.Offset = int32(enc.n)
- if err := enc.writeIndexEntries(); err != nil {
- return err
- }
- enc.trailer.Series.Index.Size = int32(enc.n) - enc.trailer.Series.Index.Offset
-
- // Flush bloom filter.
- enc.trailer.Bloom.K = enc.filter.K()
- enc.trailer.Bloom.Offset = int32(enc.n)
- if err := writeTo(enc.w, enc.filter.Bytes(), &enc.n); err != nil {
- return err
- }
- enc.trailer.Bloom.Size = int32(enc.n) - enc.trailer.Bloom.Offset
-
- // Write the sketches out.
- enc.trailer.Sketch.Offset = int32(enc.n)
- if err := writeSketchTo(enc.w, enc.sketch, &enc.n); err != nil {
- return err
- }
- enc.trailer.Sketch.Size = int32(enc.n) - enc.trailer.Sketch.Offset
-
- enc.trailer.TSketch.Offset = int32(enc.n)
- if err := writeSketchTo(enc.w, enc.tSketch, &enc.n); err != nil {
- return err
- }
- enc.trailer.TSketch.Size = int32(enc.n) - enc.trailer.TSketch.Offset
-
- // Write trailer.
- nn, err := enc.trailer.WriteTo(enc.w)
- enc.n += nn
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// writeIndexEntries writes a list of series hash index entries.
-func (enc *SeriesBlockEncoder) writeIndexEntries() error {
- enc.trailer.Series.Index.N = int32(len(enc.indexes))
-
- for _, idx := range enc.indexes {
- // Write offset/size.
- if err := writeUint32To(enc.w, uint32(idx.offset), &enc.n); err != nil {
- return err
- } else if err := writeUint32To(enc.w, uint32(idx.size), &enc.n); err != nil {
- return err
- }
-
- // Write capacity.
- if err := writeUint32To(enc.w, uint32(idx.capacity), &enc.n); err != nil {
- return err
- }
-
- // Write min key.
- if err := writeUint32To(enc.w, uint32(len(idx.min)), &enc.n); err != nil {
- return err
- } else if err := writeTo(enc.w, idx.min, &enc.n); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// ensureHeaderWritten writes a single empty byte at the front of the file
-// so that series offsets will always be non-zero.
-func (enc *SeriesBlockEncoder) ensureHeaderWritten() error {
- if enc.n > 0 {
- return nil
- }
-
- if _, err := enc.w.Write([]byte{0}); err != nil {
- return err
- }
- enc.n++
-
- return nil
-}
-
-// checkFlushIndex flushes a hash index segment if the index is too large.
-// The min argument specifies the lowest series key in the next index, if one is created.
-func (enc *SeriesBlockEncoder) checkFlushIndex(min []byte) error {
- // Ignore if there is still room in the index.
- if enc.offsets.Len() < MaxSeriesBlockHashSize {
- return nil
- }
-
- // Flush index values.
- if err := enc.flushIndex(); err != nil {
- return nil
- }
-
- // Reset index and save minimum series key.
- enc.offsets.Reset()
- enc.indexMin = make([]byte, len(min))
- copy(enc.indexMin, min)
-
- return nil
-}
-
-// flushIndex flushes the hash index segment.
-func (enc *SeriesBlockEncoder) flushIndex() error {
- if enc.offsets.Len() == 0 {
- return nil
- }
-
- // Write index segment flag.
- if err := writeUint8To(enc.w, SeriesHashIndexFlag, &enc.n); err != nil {
- return err
- }
- // Write index capacity.
- // This is used for skipping over when iterating sequentially.
- if err := writeUint32To(enc.w, uint32(enc.offsets.Cap()), &enc.n); err != nil {
- return err
- }
-
- // Determine size.
- var sz int64 = enc.offsets.Cap() * 4
-
- // Save current position to ensure size is correct by the end.
- offset := enc.n
-
- // Encode hash map offset entries.
- for i := int64(0); i < enc.offsets.Cap(); i++ {
- _, v := enc.offsets.Elem(i)
- seriesOffset, _ := v.(uint32)
-
- if err := writeUint32To(enc.w, uint32(seriesOffset), &enc.n); err != nil {
- return err
- }
- }
-
- // Determine total size.
- size := enc.n - offset
-
- // Verify actual size equals calculated size.
- if size != sz {
- return fmt.Errorf("series hash index size mismatch: %d <> %d", size, sz)
- }
-
- // Add to index entries.
- enc.indexes = append(enc.indexes, seriesBlockIndexEncodeInfo{
- offset: uint32(offset),
- size: uint32(size),
- capacity: uint32(enc.offsets.Cap()),
- min: enc.indexMin,
- })
-
- // Clear next min.
- enc.indexMin = nil
-
- return nil
-}
-
-// seriesBlockIndexEncodeInfo stores offset information for seriesBlockIndex structures.
-type seriesBlockIndexEncodeInfo struct {
- offset uint32
- size uint32
- capacity uint32
- min []byte
-}
-
-// ReadSeriesBlockTrailer returns the series list trailer from data.
-func ReadSeriesBlockTrailer(data []byte) SeriesBlockTrailer {
- var t SeriesBlockTrailer
-
- // Slice trailer data.
- buf := data[len(data)-SeriesBlockTrailerSize:]
-
- // Read series data info.
- t.Series.Data.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.Series.Data.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- // Read series hash index info.
- t.Series.Index.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.Series.Index.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.Series.Index.N, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- // Read bloom filter info.
- t.Bloom.K, buf = binary.BigEndian.Uint64(buf[0:8]), buf[8:]
- t.Bloom.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.Bloom.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- // Read series sketch info.
- t.Sketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.Sketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- // Read tombstone series sketch info.
- t.TSketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.TSketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- // Read series & tombstone count.
- t.SeriesN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
- t.TombstoneN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]
-
- return t
-}
-
-// SeriesBlockTrailer represents meta data written to the end of the series list.
-type SeriesBlockTrailer struct {
- Series struct {
- Data struct {
- Offset int32
- Size int32
- }
- Index struct {
- Offset int32
- Size int32
- N int32
- }
- }
-
- // Bloom filter info.
- Bloom struct {
- K uint64
- Offset int32
- Size int32
- }
-
- // Offset and size of cardinality sketch for measurements.
- Sketch struct {
- Offset int32
- Size int32
- }
-
- // Offset and size of cardinality sketch for tombstoned measurements.
- TSketch struct {
- Offset int32
- Size int32
- }
-
- SeriesN int32
- TombstoneN int32
-}
-
-func (t SeriesBlockTrailer) WriteTo(w io.Writer) (n int64, err error) {
- if err := writeUint32To(w, uint32(t.Series.Data.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Series.Data.Size), &n); err != nil {
- return n, err
- }
-
- if err := writeUint32To(w, uint32(t.Series.Index.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Series.Index.Size), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Series.Index.N), &n); err != nil {
- return n, err
- }
-
- // Write bloom filter info.
- if err := writeUint64To(w, t.Bloom.K, &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Bloom.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Bloom.Size), &n); err != nil {
- return n, err
- }
-
- // Write measurement sketch info.
- if err := writeUint32To(w, uint32(t.Sketch.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.Sketch.Size), &n); err != nil {
- return n, err
- }
-
- // Write tombstone measurement sketch info.
- if err := writeUint32To(w, uint32(t.TSketch.Offset), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.TSketch.Size), &n); err != nil {
- return n, err
- }
-
- // Write series and tombstone count.
- if err := writeUint32To(w, uint32(t.SeriesN), &n); err != nil {
- return n, err
- } else if err := writeUint32To(w, uint32(t.TombstoneN), &n); err != nil {
- return n, err
- }
-
- return n, nil
-}
-
-type serie struct {
- name []byte
- tags models.Tags
- deleted bool
- offset uint32
-}
-
-func (s *serie) flag() uint8 { return encodeSerieFlag(s.deleted) }
-
-func encodeSerieFlag(deleted bool) byte {
- var flag byte
- if deleted {
- flag |= SeriesTombstoneFlag
- }
- return flag
-}
-
-type series []serie
-
-func (a series) Len() int { return len(a) }
-func (a series) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a series) Less(i, j int) bool {
- if cmp := bytes.Compare(a[i].name, a[j].name); cmp != 0 {
- return cmp == -1
- }
- return models.CompareTags(a[i].tags, a[j].tags) == -1
-}
-
-// mapIndexFileSeriesBlock maps a writer to a series block.
-// Returns the series block and the mmap byte slice (if mmap is used).
-// The memory-mapped slice MUST be unmapped by the caller.
-func mapIndexFileSeriesBlock(w io.Writer) (*SeriesBlock, []byte, error) {
- switch w := w.(type) {
- case *bytes.Buffer:
- return mapIndexFileSeriesBlockBuffer(w)
- case *os.File:
- return mapIndexFileSeriesBlockFile(w)
- default:
- return nil, nil, fmt.Errorf("invalid tsi1 writer type: %T", w)
- }
-}
-
-// mapIndexFileSeriesBlockBuffer maps a buffer to a series block.
-func mapIndexFileSeriesBlockBuffer(buf *bytes.Buffer) (*SeriesBlock, []byte, error) {
- data := buf.Bytes()
- data = data[len(FileSignature):] // Skip file signature.
-
- var sblk SeriesBlock
- if err := sblk.UnmarshalBinary(data); err != nil {
- return nil, nil, err
- }
- return &sblk, nil, nil
-}
-
-// mapIndexFileSeriesBlockFile memory-maps a file to a series block.
-func mapIndexFileSeriesBlockFile(f *os.File) (*SeriesBlock, []byte, error) {
- // Open a read-only memory map of the existing data.
- data, err := mmap.Map(f.Name())
- if err != nil {
- return nil, nil, err
- }
- sblk_data := data[len(FileSignature):] // Skip file signature.
-
- // Unmarshal block on top of mmap.
- var sblk SeriesBlock
- if err := sblk.UnmarshalBinary(sblk_data); err != nil {
- mmap.Unmap(data)
- return nil, nil, err
- }
-
- return &sblk, data, nil
-}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go
deleted file mode 100644
index 3455abce93..0000000000
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package tsi1_test
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/tsdb/index/tsi1"
-)
-
-// Ensure series block can be unmarshaled.
-func TestSeriesBlock_UnmarshalBinary(t *testing.T) {
- if _, err := CreateSeriesBlock([]Series{
- {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})},
- {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
- {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})},
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// Ensure series block contains the correct set of series.
-func TestSeriesBlock_Series(t *testing.T) {
- series := []Series{
- {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})},
- {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
- {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})},
- }
- l := MustCreateSeriesBlock(series)
-
- // Verify total number of series is correct.
- if n := l.SeriesCount(); n != 3 {
- t.Fatalf("unexpected series count: %d", n)
- }
-
- // Verify all series exist.
- for i, s := range series {
- if e := l.Series(s.Name, s.Tags); e == nil {
- t.Fatalf("series does not exist: i=%d", i)
- } else if !bytes.Equal(e.Name(), s.Name) || models.CompareTags(e.Tags(), s.Tags) != 0 {
- t.Fatalf("series element does not match: i=%d, %s (%s) != %s (%s)", i, e.Name(), e.Tags().String(), s.Name, s.Tags.String())
- } else if e.Deleted() {
- t.Fatalf("series deleted: i=%d", i)
- }
- }
-
- // Verify non-existent series doesn't exist.
- if e := l.Series([]byte("foo"), models.NewTags(map[string]string{"region": "north"})); e != nil {
- t.Fatalf("series should not exist: %#v", e)
- }
-}
-
-// CreateSeriesBlock returns an in-memory SeriesBlock with a list of series.
-func CreateSeriesBlock(a []Series) (*tsi1.SeriesBlock, error) {
- var buf bytes.Buffer
-
- // Create writer and sketches. Add series.
- enc := tsi1.NewSeriesBlockEncoder(&buf, uint32(len(a)), M, K)
- for i, s := range a {
- if err := enc.Encode(s.Name, s.Tags, s.Deleted); err != nil {
- return nil, fmt.Errorf("SeriesBlockWriter.Add(): i=%d, err=%s", i, err)
- }
- }
-
- // Close and flush.
- if err := enc.Close(); err != nil {
- return nil, fmt.Errorf("SeriesBlockWriter.WriteTo(): %s", err)
- }
-
- // Unpack bytes into series block.
- var blk tsi1.SeriesBlock
- if err := blk.UnmarshalBinary(buf.Bytes()); err != nil {
- return nil, fmt.Errorf("SeriesBlock.UnmarshalBinary(): %s", err)
- }
-
- return &blk, nil
-}
-
-// MustCreateSeriesBlock calls CreateSeriesBlock(). Panic on error.
-func MustCreateSeriesBlock(a []Series) *tsi1.SeriesBlock {
- l, err := CreateSeriesBlock(a)
- if err != nil {
- panic(err)
- }
- return l
-}
-
-// Series represents name/tagset pairs that are used in testing.
-type Series struct {
- Name []byte
- Tags models.Tags
- Deleted bool
-}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go
index 1a17d62776..ca1d06fba9 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go
@@ -90,6 +90,14 @@ func (blk *TagBlock) UnmarshalBinary(data []byte) error {
// TagKeyElem returns an element for a tag key.
// Returns an element with a nil key if not found.
func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem {
+ var elem TagBlockKeyElem
+ if !blk.DecodeTagKeyElem(key, &elem) {
+ return nil
+ }
+ return &elem
+}
+
+func (blk *TagBlock) DecodeTagKeyElem(key []byte, elem *TagBlockKeyElem) bool {
keyN := int64(binary.BigEndian.Uint64(blk.hashData[:TagKeyNSize]))
hash := rhh.HashKey(key)
pos := hash % keyN
@@ -100,21 +108,20 @@ func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem {
// Find offset of tag key.
offset := binary.BigEndian.Uint64(blk.hashData[TagKeyNSize+(pos*TagKeyOffsetSize):])
if offset == 0 {
- return nil
+ return false
}
// Parse into element.
- var e TagBlockKeyElem
- e.unmarshal(blk.data[offset:], blk.data)
+ elem.unmarshal(blk.data[offset:], blk.data)
// Return if keys match.
- if bytes.Equal(e.key, key) {
- return &e
+ if bytes.Equal(elem.key, key) {
+ return true
}
// Check if we've exceeded the probe distance.
- if d > rhh.Dist(rhh.HashKey(e.key), pos, keyN) {
- return nil
+ if d > rhh.Dist(rhh.HashKey(elem.key), pos, keyN) {
+ return false
}
// Move position forward.
@@ -122,21 +129,39 @@ func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem {
d++
if d > keyN {
- return nil
+ return false
}
}
}
// TagValueElem returns an element for a tag value.
func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem {
- // Find key element, exit if not found.
- kelem, _ := blk.TagKeyElem(key).(*TagBlockKeyElem)
- if kelem == nil {
+ var valueElem TagBlockValueElem
+ if !blk.DecodeTagValueElem(key, value, &valueElem) {
return nil
}
+ return &valueElem
+}
+
+// TagValueElem returns an element for a tag value.
+func (blk *TagBlock) TagValueSeriesData(key, value []byte) (uint64, []byte) {
+ var valueElem TagBlockValueElem
+ if !blk.DecodeTagValueElem(key, value, &valueElem) {
+ return 0, nil
+ }
+ return valueElem.series.n, valueElem.series.data
+}
+
+// DecodeTagValueElem returns an element for a tag value.
+func (blk *TagBlock) DecodeTagValueElem(key, value []byte, valueElem *TagBlockValueElem) bool {
+ // Find key element, exit if not found.
+ var keyElem TagBlockKeyElem
+ if !blk.DecodeTagKeyElem(key, &keyElem) {
+ return false
+ }
// Slice hash index data.
- hashData := kelem.hashIndex.buf
+ hashData := keyElem.hashIndex.buf
valueN := int64(binary.BigEndian.Uint64(hashData[:TagValueNSize]))
hash := rhh.HashKey(value)
@@ -148,22 +173,21 @@ func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem {
// Find offset of tag value.
offset := binary.BigEndian.Uint64(hashData[TagValueNSize+(pos*TagValueOffsetSize):])
if offset == 0 {
- return nil
+ return false
}
// Parse into element.
- var e TagBlockValueElem
- e.unmarshal(blk.data[offset:])
+ valueElem.unmarshal(blk.data[offset:])
// Return if values match.
- if bytes.Equal(e.value, value) {
- return &e
+ if bytes.Equal(valueElem.value, value) {
+ return true
}
// Check if we've exceeded the probe distance.
- max := rhh.Dist(rhh.HashKey(e.value), pos, valueN)
+ max := rhh.Dist(rhh.HashKey(valueElem.value), pos, valueN)
if d > max {
- return nil
+ return false
}
// Move position forward.
@@ -171,7 +195,7 @@ func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem {
d++
if d > valueN {
- return nil
+ return false
}
}
}
@@ -300,7 +324,7 @@ type TagBlockValueElem struct {
flag byte
value []byte
series struct {
- n uint32 // Series count
+ n uint64 // Series count
data []byte // Raw series data
}
@@ -314,25 +338,25 @@ func (e *TagBlockValueElem) Deleted() bool { return (e.flag & TagValueTombstoneF
func (e *TagBlockValueElem) Value() []byte { return e.value }
// SeriesN returns the series count.
-func (e *TagBlockValueElem) SeriesN() uint32 { return e.series.n }
+func (e *TagBlockValueElem) SeriesN() uint64 { return e.series.n }
// SeriesData returns the raw series data.
func (e *TagBlockValueElem) SeriesData() []byte { return e.series.data }
// SeriesID returns series ID at an index.
-func (e *TagBlockValueElem) SeriesID(i int) uint32 {
- return binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:])
+func (e *TagBlockValueElem) SeriesID(i int) uint64 {
+ return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:])
}
// SeriesIDs returns a list decoded series ids.
-func (e *TagBlockValueElem) SeriesIDs() []uint32 {
- a := make([]uint32, 0, e.series.n)
- var prev uint32
+func (e *TagBlockValueElem) SeriesIDs() []uint64 {
+ a := make([]uint64, 0, e.series.n)
+ var prev uint64
for data := e.series.data; len(data) > 0; {
delta, n := binary.Uvarint(data)
data = data[n:]
- seriesID := prev + uint32(delta)
+ seriesID := prev + uint64(delta)
a = append(a, seriesID)
prev = seriesID
}
@@ -355,7 +379,7 @@ func (e *TagBlockValueElem) unmarshal(buf []byte) {
// Parse series count.
v, n := binary.Uvarint(buf)
- e.series.n = uint32(v)
+ e.series.n = uint64(v)
buf = buf[n:]
// Parse data block size.
@@ -536,7 +560,7 @@ func (enc *TagBlockEncoder) EncodeKey(key []byte, deleted bool) error {
// EncodeValue writes a tag value to the underlying writer.
// The tag key must be lexicographical sorted after the previous encoded tag key.
-func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []uint32) error {
+func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []uint64) error {
if len(enc.keys) == 0 {
return fmt.Errorf("tag key must be encoded before encoding values")
} else if len(value) == 0 {
@@ -567,7 +591,7 @@ func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []
// Build series data in buffer.
enc.buf.Reset()
- var prev uint32
+ var prev uint64
for _, seriesID := range seriesIDs {
delta := seriesID - prev
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go
index 4de527e16d..f69042a4f3 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go
@@ -17,19 +17,19 @@ func TestTagBlockWriter(t *testing.T) {
if err := enc.EncodeKey([]byte("host"), false); err != nil {
t.Fatal(err)
- } else if err := enc.EncodeValue([]byte("server0"), false, []uint32{1}); err != nil {
+ } else if err := enc.EncodeValue([]byte("server0"), false, []uint64{1}); err != nil {
t.Fatal(err)
- } else if err := enc.EncodeValue([]byte("server1"), false, []uint32{2}); err != nil {
+ } else if err := enc.EncodeValue([]byte("server1"), false, []uint64{2}); err != nil {
t.Fatal(err)
- } else if err := enc.EncodeValue([]byte("server2"), false, []uint32{3}); err != nil {
+ } else if err := enc.EncodeValue([]byte("server2"), false, []uint64{3}); err != nil {
t.Fatal(err)
}
if err := enc.EncodeKey([]byte("region"), false); err != nil {
t.Fatal(err)
- } else if err := enc.EncodeValue([]byte("us-east"), false, []uint32{1, 2}); err != nil {
+ } else if err := enc.EncodeValue([]byte("us-east"), false, []uint64{1, 2}); err != nil {
t.Fatal(err)
- } else if err := enc.EncodeValue([]byte("us-west"), false, []uint32{3}); err != nil {
+ } else if err := enc.EncodeValue([]byte("us-west"), false, []uint64{3}); err != nil {
t.Fatal(err)
}
@@ -49,28 +49,28 @@ func TestTagBlockWriter(t *testing.T) {
// Verify data.
if e := blk.TagValueElem([]byte("region"), []byte("us-east")); e == nil {
t.Fatal("expected element")
- } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1, 2}) {
+ } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{1, 2}) {
t.Fatalf("unexpected series ids: %#v", a)
}
if e := blk.TagValueElem([]byte("region"), []byte("us-west")); e == nil {
t.Fatal("expected element")
- } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) {
+ } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{3}) {
t.Fatalf("unexpected series ids: %#v", a)
}
if e := blk.TagValueElem([]byte("host"), []byte("server0")); e == nil {
t.Fatal("expected element")
- } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1}) {
+ } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{1}) {
t.Fatalf("unexpected series ids: %#v", a)
}
if e := blk.TagValueElem([]byte("host"), []byte("server1")); e == nil {
t.Fatal("expected element")
- } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{2}) {
+ } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{2}) {
t.Fatalf("unexpected series ids: %#v", a)
}
if e := blk.TagValueElem([]byte("host"), []byte("server2")); e == nil {
t.Fatal("expected element")
- } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) {
+ } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{3}) {
t.Fatalf("unexpected series ids: %#v", a)
}
}
@@ -105,7 +105,7 @@ func benchmarkTagBlock_SeriesN(b *testing.B, tagN, valueN int, blk **tsi1.TagBlo
}
for j := 0; j < valueN; j++ {
- if err := enc.EncodeValue([]byte(fmt.Sprintf("%08d", j)), false, []uint32{1}); err != nil {
+ if err := enc.EncodeValue([]byte(fmt.Sprintf("%08d", j)), false, []uint64{1}); err != nil {
b.Fatal(err)
}
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go
index 20ac48c4a6..df9e81a2a6 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go
@@ -7,11 +7,9 @@ import (
"fmt"
"io"
"os"
+ "runtime/debug"
- "github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/query"
"github.com/influxdata/influxdb/tsdb"
- "github.com/influxdata/influxql"
)
// LoadFactor is the fill percent for RHH indexes.
@@ -21,7 +19,7 @@ const LoadFactor = 80
type MeasurementElem interface {
Name() []byte
Deleted() bool
- HasSeries() bool
+ // HasSeries() bool
}
// MeasurementElems represents a list of MeasurementElem.
@@ -116,37 +114,31 @@ func (p measurementMergeElem) Deleted() bool {
return p[0].Deleted()
}
-func (p measurementMergeElem) HasSeries() bool {
- for _, v := range p {
- if v.HasSeries() {
- return true
- }
- }
- return false
-}
-
-// filterUndeletedMeasurementIterator returns all measurements which are not deleted.
-type filterUndeletedMeasurementIterator struct {
+// tsdbMeasurementIteratorAdapter wraps MeasurementIterator to match the TSDB interface.
+// This is needed because TSDB doesn't have a concept of "deleted" measurements.
+type tsdbMeasurementIteratorAdapter struct {
itr MeasurementIterator
}
-// FilterUndeletedMeasurementIterator returns an iterator which filters all deleted measurement.
-func FilterUndeletedMeasurementIterator(itr MeasurementIterator) MeasurementIterator {
+// NewTSDBMeasurementIteratorAdapter return an iterator which implements tsdb.MeasurementIterator.
+func NewTSDBMeasurementIteratorAdapter(itr MeasurementIterator) tsdb.MeasurementIterator {
if itr == nil {
return nil
}
- return &filterUndeletedMeasurementIterator{itr: itr}
+ return &tsdbMeasurementIteratorAdapter{itr: itr}
}
-func (itr *filterUndeletedMeasurementIterator) Next() MeasurementElem {
+func (itr *tsdbMeasurementIteratorAdapter) Close() error { return nil }
+
+func (itr *tsdbMeasurementIteratorAdapter) Next() ([]byte, error) {
for {
e := itr.itr.Next()
if e == nil {
- return nil
+ return nil, nil
} else if e.Deleted() {
continue
}
- return e
+ return e.Name(), nil
}
}
@@ -162,6 +154,34 @@ type TagKeyIterator interface {
Next() TagKeyElem
}
+// tsdbTagKeyIteratorAdapter wraps TagKeyIterator to match the TSDB interface.
+// This is needed because TSDB doesn't have a concept of "deleted" tag keys.
+type tsdbTagKeyIteratorAdapter struct {
+ itr TagKeyIterator
+}
+
+// NewTSDBTagKeyIteratorAdapter return an iterator which implements tsdb.TagKeyIterator.
+func NewTSDBTagKeyIteratorAdapter(itr TagKeyIterator) tsdb.TagKeyIterator {
+ if itr == nil {
+ return nil
+ }
+ return &tsdbTagKeyIteratorAdapter{itr: itr}
+}
+
+func (itr *tsdbTagKeyIteratorAdapter) Close() error { return nil }
+
+func (itr *tsdbTagKeyIteratorAdapter) Next() ([]byte, error) {
+ for {
+ e := itr.itr.Next()
+ if e == nil {
+ return nil, nil
+ } else if e.Deleted() {
+ continue
+ }
+ return e.Key(), nil
+ }
+}
+
// MergeTagKeyIterators returns an iterator that merges a set of iterators.
// Iterators that are first in the list take precendence and a deletion by those
// early iterators will invalidate elements by later iterators.
@@ -272,6 +292,34 @@ type TagValueIterator interface {
Next() TagValueElem
}
+// tsdbTagValueIteratorAdapter wraps TagValueIterator to match the TSDB interface.
+// This is needed because TSDB doesn't have a concept of "deleted" tag values.
+type tsdbTagValueIteratorAdapter struct {
+ itr TagValueIterator
+}
+
+// NewTSDBTagValueIteratorAdapter return an iterator which implements tsdb.TagValueIterator.
+func NewTSDBTagValueIteratorAdapter(itr TagValueIterator) tsdb.TagValueIterator {
+ if itr == nil {
+ return nil
+ }
+ return &tsdbTagValueIteratorAdapter{itr: itr}
+}
+
+func (itr *tsdbTagValueIteratorAdapter) Close() error { return nil }
+
+func (itr *tsdbTagValueIteratorAdapter) Next() ([]byte, error) {
+ for {
+ e := itr.itr.Next()
+ if e == nil {
+ return nil, nil
+ } else if e.Deleted() {
+ continue
+ }
+ return e.Value(), nil
+ }
+}
+
// MergeTagValueIterators returns an iterator that merges a set of iterators.
// Iterators that are first in the list take precendence and a deletion by those
// early iterators will invalidate elements by later iterators.
@@ -352,373 +400,80 @@ func (p tagValueMergeElem) Deleted() bool {
return p[0].Deleted()
}
-// SeriesElemKey encodes e as a series key.
-func SeriesElemKey(e tsdb.SeriesElem) []byte {
- name, tags := e.Name(), e.Tags()
-
- // TODO: Precompute allocation size.
- // FIXME: Handle escaping.
-
- var buf []byte
- buf = append(buf, name...)
- for _, t := range tags {
- buf = append(buf, ',')
- buf = append(buf, t.Key...)
- buf = append(buf, '=')
- buf = append(buf, t.Value...)
- }
- return buf
-}
-
-// CompareSeriesElem returns -1 if a < b, 1 if a > b, and 0 if equal.
-func CompareSeriesElem(a, b tsdb.SeriesElem) int {
- if cmp := bytes.Compare(a.Name(), b.Name()); cmp != 0 {
- return cmp
- }
- return models.CompareTags(a.Tags(), b.Tags())
-}
-
-// seriesElem represents an in-memory implementation of SeriesElem.
-type seriesElem struct {
- name []byte
- tags models.Tags
- deleted bool
+/*
+type SeriesPointMergeIterator interface {
+ Next() (*query.FloatPoint, error)
+ Close() error
+ Stats() query.IteratorStats
}
-func (e *seriesElem) Name() []byte { return e.name }
-func (e *seriesElem) Tags() models.Tags { return e.tags }
-func (e *seriesElem) Deleted() bool { return e.deleted }
-func (e *seriesElem) Expr() influxql.Expr { return nil }
-
-// MergeSeriesIterators returns an iterator that merges a set of iterators.
-// Iterators that are first in the list take precendence and a deletion by those
-// early iterators will invalidate elements by later iterators.
-func MergeSeriesIterators(itrs ...tsdb.SeriesIterator) tsdb.SeriesIterator {
+func MergeSeriesPointIterators(itrs ...*seriesPointIterator) SeriesPointMergeIterator {
if n := len(itrs); n == 0 {
return nil
} else if n == 1 {
return itrs[0]
}
- return &seriesMergeIterator{
- buf: make([]tsdb.SeriesElem, len(itrs)),
+ return &seriesPointMergeIterator{
+ buf: make([]*query.FloatPoint, len(itrs)),
itrs: itrs,
}
}
-// seriesMergeIterator is an iterator that merges multiple iterators together.
-type seriesMergeIterator struct {
- buf []tsdb.SeriesElem
- itrs []tsdb.SeriesIterator
+type seriesPointMergeIterator struct {
+ buf []*query.FloatPoint
+ itrs []*seriesPointIterator
}
-// Next returns the element with the next lowest name/tags across the iterators.
-//
-// If multiple iterators contain the same name/tags then the first is returned
-// and the remaining ones are skipped.
-func (itr *seriesMergeIterator) Next() tsdb.SeriesElem {
- // Find next lowest name/tags amongst the buffers.
- var name []byte
- var tags models.Tags
+func (itr *seriesPointMergeIterator) Close() error {
+ for i := range itr.itrs {
+ itr.itrs[i].Close()
+ }
+ return nil
+}
+func (itr *seriesPointMergeIterator) Stats() query.IteratorStats {
+ return query.IteratorStats{}
+}
+
+func (itr *seriesPointMergeIterator) Next() (_ *query.FloatPoint, err error) {
+ // Find next lowest point amongst the buffers.
+ var key []byte
for i, buf := range itr.buf {
// Fill buffer.
if buf == nil {
- if buf = itr.itrs[i].Next(); buf != nil {
+ if buf, err = itr.itrs[i].Next(); err != nil {
+ return nil, err
+ } else if buf != nil {
itr.buf[i] = buf
} else {
continue
}
}
- // If the name is not set the pick the first non-empty name.
- if name == nil {
- name, tags = buf.Name(), buf.Tags()
- continue
- }
-
- // Set name/tags if they are lower than what has been seen.
- if cmp := bytes.Compare(buf.Name(), name); cmp == -1 || (cmp == 0 && models.CompareTags(buf.Tags(), tags) == -1) {
- name, tags = buf.Name(), buf.Tags()
+ // Find next lowest key.
+ if key == nil || bytes.Compare(buf.Key(), key) == -1 {
+ key = buf.Key()
}
}
// Return nil if no elements remaining.
- if name == nil {
- return nil
+ if key == nil {
+ return nil, nil
}
- // Refill buffer.
- var e tsdb.SeriesElem
+ // Merge elements together & clear buffer.
+ itr.e = itr.e[:0]
for i, buf := range itr.buf {
- if buf == nil || !bytes.Equal(buf.Name(), name) || models.CompareTags(buf.Tags(), tags) != 0 {
+ if buf == nil || !bytes.Equal(buf.Key(), key) {
continue
}
-
- // Copy first matching buffer to the return buffer.
- if e == nil {
- e = buf
- }
-
- // Clear buffer.
+ itr.e = append(itr.e, buf)
itr.buf[i] = nil
}
- return e
-}
-
-// IntersectSeriesIterators returns an iterator that only returns series which
-// occur in both iterators. If both series have associated expressions then
-// they are combined together.
-func IntersectSeriesIterators(itr0, itr1 tsdb.SeriesIterator) tsdb.SeriesIterator {
- if itr0 == nil || itr1 == nil {
- return nil
- }
-
- return &seriesIntersectIterator{itrs: [2]tsdb.SeriesIterator{itr0, itr1}}
-}
-// seriesIntersectIterator is an iterator that merges two iterators together.
-type seriesIntersectIterator struct {
- e seriesExprElem
- buf [2]tsdb.SeriesElem
- itrs [2]tsdb.SeriesIterator
-}
-
-// Next returns the next element which occurs in both iterators.
-func (itr *seriesIntersectIterator) Next() (e tsdb.SeriesElem) {
- for {
- // Fill buffers.
- if itr.buf[0] == nil {
- itr.buf[0] = itr.itrs[0].Next()
- }
- if itr.buf[1] == nil {
- itr.buf[1] = itr.itrs[1].Next()
- }
-
- // Exit if either buffer is still empty.
- if itr.buf[0] == nil || itr.buf[1] == nil {
- return nil
- }
-
- // Skip if both series are not equal.
- if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {
- itr.buf[0] = nil
- continue
- } else if cmp == 1 {
- itr.buf[1] = nil
- continue
- }
-
- // Merge series together if equal.
- itr.e.SeriesElem = itr.buf[0]
-
- // Attach expression.
- expr0 := itr.buf[0].Expr()
- expr1 := itr.buf[1].Expr()
- if expr0 == nil {
- itr.e.expr = expr1
- } else if expr1 == nil {
- itr.e.expr = expr0
- } else {
- itr.e.expr = influxql.Reduce(&influxql.BinaryExpr{
- Op: influxql.AND,
- LHS: expr0,
- RHS: expr1,
- }, nil)
- }
-
- itr.buf[0], itr.buf[1] = nil, nil
- return &itr.e
- }
-}
-
-// UnionSeriesIterators returns an iterator that returns series from both
-// both iterators. If both series have associated expressions then they are
-// combined together.
-func UnionSeriesIterators(itr0, itr1 tsdb.SeriesIterator) tsdb.SeriesIterator {
- // Return other iterator if either one is nil.
- if itr0 == nil {
- return itr1
- } else if itr1 == nil {
- return itr0
- }
-
- return &seriesUnionIterator{itrs: [2]tsdb.SeriesIterator{itr0, itr1}}
-}
-
-// seriesUnionIterator is an iterator that unions two iterators together.
-type seriesUnionIterator struct {
- e seriesExprElem
- buf [2]tsdb.SeriesElem
- itrs [2]tsdb.SeriesIterator
-}
-
-// Next returns the next element which occurs in both iterators.
-func (itr *seriesUnionIterator) Next() (e tsdb.SeriesElem) {
- // Fill buffers.
- if itr.buf[0] == nil {
- itr.buf[0] = itr.itrs[0].Next()
- }
- if itr.buf[1] == nil {
- itr.buf[1] = itr.itrs[1].Next()
- }
-
- // Return the other iterator if either one is empty.
- if itr.buf[0] == nil {
- e, itr.buf[1] = itr.buf[1], nil
- return e
- } else if itr.buf[1] == nil {
- e, itr.buf[0] = itr.buf[0], nil
- return e
- }
-
- // Return lesser series.
- if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {
- e, itr.buf[0] = itr.buf[0], nil
- return e
- } else if cmp == 1 {
- e, itr.buf[1] = itr.buf[1], nil
- return e
- }
-
- // Attach element.
- itr.e.SeriesElem = itr.buf[0]
-
- // Attach expression.
- expr0 := itr.buf[0].Expr()
- expr1 := itr.buf[1].Expr()
- if expr0 != nil && expr1 != nil {
- itr.e.expr = influxql.Reduce(&influxql.BinaryExpr{
- Op: influxql.OR,
- LHS: expr0,
- RHS: expr1,
- }, nil)
- } else {
- itr.e.expr = nil
- }
-
- itr.buf[0], itr.buf[1] = nil, nil
- return &itr.e
-}
-
-// DifferenceSeriesIterators returns an iterator that only returns series which
-// occur the first iterator but not the second iterator.
-func DifferenceSeriesIterators(itr0, itr1 tsdb.SeriesIterator) tsdb.SeriesIterator {
- if itr0 != nil && itr1 == nil {
- return itr0
- } else if itr0 == nil {
- return nil
- }
- return &seriesDifferenceIterator{itrs: [2]tsdb.SeriesIterator{itr0, itr1}}
-}
-
-// seriesDifferenceIterator is an iterator that merges two iterators together.
-type seriesDifferenceIterator struct {
- buf [2]tsdb.SeriesElem
- itrs [2]tsdb.SeriesIterator
-}
-
-// Next returns the next element which occurs only in the first iterator.
-func (itr *seriesDifferenceIterator) Next() (e tsdb.SeriesElem) {
- for {
- // Fill buffers.
- if itr.buf[0] == nil {
- itr.buf[0] = itr.itrs[0].Next()
- }
- if itr.buf[1] == nil {
- itr.buf[1] = itr.itrs[1].Next()
- }
-
- // Exit if first buffer is still empty.
- if itr.buf[0] == nil {
- return nil
- } else if itr.buf[1] == nil {
- e, itr.buf[0] = itr.buf[0], nil
- return e
- }
-
- // Return first series if it's less.
- // If second series is less then skip it.
- // If both series are equal then skip both.
- if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {
- e, itr.buf[0] = itr.buf[0], nil
- return e
- } else if cmp == 1 {
- itr.buf[1] = nil
- continue
- } else {
- itr.buf[0], itr.buf[1] = nil, nil
- continue
- }
- }
-}
-
-// filterUndeletedSeriesIterator returns all series which are not deleted.
-type filterUndeletedSeriesIterator struct {
- itr tsdb.SeriesIterator
-}
-
-// FilterUndeletedSeriesIterator returns an iterator which filters all deleted series.
-func FilterUndeletedSeriesIterator(itr tsdb.SeriesIterator) tsdb.SeriesIterator {
- if itr == nil {
- return nil
- }
- return &filterUndeletedSeriesIterator{itr: itr}
-}
-
-func (itr *filterUndeletedSeriesIterator) Next() tsdb.SeriesElem {
- for {
- e := itr.itr.Next()
- if e == nil {
- return nil
- } else if e.Deleted() {
- continue
- }
- return e
- }
-}
-
-// seriesExprElem holds a series and its associated filter expression.
-type seriesExprElem struct {
- tsdb.SeriesElem
- expr influxql.Expr
-}
-
-// Expr returns the associated expression.
-func (e *seriesExprElem) Expr() influxql.Expr { return e.expr }
-
-// seriesExprIterator is an iterator that attaches an associated expression.
-type seriesExprIterator struct {
- itr tsdb.SeriesIterator
- e seriesExprElem
-}
-
-// newSeriesExprIterator returns a new instance of seriesExprIterator.
-func newSeriesExprIterator(itr tsdb.SeriesIterator, expr influxql.Expr) tsdb.SeriesIterator {
- if itr == nil {
- return nil
- }
-
- return &seriesExprIterator{
- itr: itr,
- e: seriesExprElem{
- expr: expr,
- },
- }
-}
-
-// Next returns the next element in the iterator.
-func (itr *seriesExprIterator) Next() tsdb.SeriesElem {
- itr.e.SeriesElem = itr.itr.Next()
- if itr.e.SeriesElem == nil {
- return nil
- }
- return &itr.e
-}
-
-// seriesIDIterator represents a iterator over a list of series ids.
-type seriesIDIterator interface {
- next() uint32
+ return itr.e, nil
}
+*/
// writeTo writes write v into w. Updates n.
func writeTo(w io.Writer, v []byte, n *int64) error {
@@ -795,11 +550,9 @@ func assert(condition bool, msg string, v ...interface{}) {
}
}
-type byTagKey []*query.TagSet
-
-func (t byTagKey) Len() int { return len(t) }
-func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 }
-func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-
// hexdump is a helper for dumping binary data to stderr.
func hexdump(data []byte) { os.Stderr.Write([]byte(hex.Dump(data))) }
+
+func stack() string {
+ return "------------------------\n" + string(debug.Stack()) + "------------------------\n\n"
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go
index 424f0c6a22..a96b4fd2f8 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go
@@ -3,13 +3,14 @@ package tsi1_test
import (
"bytes"
"io/ioutil"
+ "os"
+ "path/filepath"
"reflect"
"testing"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/index/tsi1"
- "github.com/influxdata/influxql"
)
// Ensure iterator can operate over an in-memory list of elements.
@@ -151,51 +152,18 @@ func TestMergeTagValueIterators(t *testing.T) {
}
// Ensure iterator can operate over an in-memory list of series.
-func TestSeriesIterator(t *testing.T) {
- elems := []SeriesElem{
- {name: []byte("cpu"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true},
- {name: []byte("mem")},
+func TestSeriesIDIterator(t *testing.T) {
+ elems := []tsdb.SeriesIDElem{
+ {SeriesID: 1},
+ {SeriesID: 2},
}
- itr := SeriesIterator{Elems: elems}
- if e := itr.Next(); !reflect.DeepEqual(&elems[0], e) {
- t.Fatalf("unexpected elem(0): %#v", e)
- } else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) {
- t.Fatalf("unexpected elem(1): %#v", e)
- } else if e := itr.Next(); e != nil {
- t.Fatalf("expected nil elem: %#v", e)
- }
-}
-
-// Ensure iterator can merge multiple iterators together.
-func TestMergeSeriesIterators(t *testing.T) {
- itr := tsi1.MergeSeriesIterators(
- &SeriesIterator{Elems: []SeriesElem{
- {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true},
- {name: []byte("bbb"), deleted: true},
- {name: []byte("ccc")},
- }},
- &SeriesIterator{},
- &SeriesIterator{Elems: []SeriesElem{
- {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}},
- {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}},
- {name: []byte("bbb")},
- {name: []byte("ccc"), deleted: true},
- {name: []byte("ddd")},
- }},
- )
-
- if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true}) {
+ itr := SeriesIDIterator{Elems: elems}
+ if e := itr.Next(); !reflect.DeepEqual(elems[0], e) {
t.Fatalf("unexpected elem(0): %#v", e)
- } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}}) {
+ } else if e := itr.Next(); !reflect.DeepEqual(elems[1], e) {
t.Fatalf("unexpected elem(1): %#v", e)
- } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("bbb"), deleted: true}) {
- t.Fatalf("unexpected elem(2): %#v", e)
- } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("ccc")}) {
- t.Fatalf("unexpected elem(3): %#v", e)
- } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("ddd")}) {
- t.Fatalf("unexpected elem(4): %#v", e)
- } else if e := itr.Next(); e != nil {
+ } else if e := itr.Next(); e.SeriesID != 0 {
t.Fatalf("expected nil elem: %#v", e)
}
}
@@ -257,9 +225,8 @@ type TagValueElem struct {
deleted bool
}
-func (e *TagValueElem) Value() []byte { return e.value }
-func (e *TagValueElem) Deleted() bool { return e.deleted }
-func (e *TagValueElem) SeriesIterator() tsdb.SeriesIterator { return nil }
+func (e *TagValueElem) Value() []byte { return e.value }
+func (e *TagValueElem) Deleted() bool { return e.deleted }
// TagValueIterator represents an iterator over a slice of tag values.
type TagValueIterator struct {
@@ -275,31 +242,18 @@ func (itr *TagValueIterator) Next() (e tsi1.TagValueElem) {
return e
}
-// SeriesElem represents a test implementation of tsi1.SeriesElem.
-type SeriesElem struct {
- name []byte
- tags models.Tags
- deleted bool
- expr influxql.Expr
-}
-
-func (e *SeriesElem) Name() []byte { return e.name }
-func (e *SeriesElem) Tags() models.Tags { return e.tags }
-func (e *SeriesElem) Deleted() bool { return e.deleted }
-func (e *SeriesElem) Expr() influxql.Expr { return e.expr }
-
-// SeriesIterator represents an iterator over a slice of tag values.
-type SeriesIterator struct {
- Elems []SeriesElem
+// SeriesIDIterator represents an iterator over a slice of series id elems.
+type SeriesIDIterator struct {
+ Elems []tsdb.SeriesIDElem
}
// Next returns the next element in the iterator.
-func (itr *SeriesIterator) Next() (e tsdb.SeriesElem) {
+func (itr *SeriesIDIterator) Next() (elem tsdb.SeriesIDElem) {
if len(itr.Elems) == 0 {
- return nil
+ return tsdb.SeriesIDElem{}
}
- e, itr.Elems = &itr.Elems[0], itr.Elems[1:]
- return e
+ elem, itr.Elems = itr.Elems[0], itr.Elems[1:]
+ return elem
}
// MustTempDir returns a temporary directory. Panic on error.
@@ -310,3 +264,49 @@ func MustTempDir() string {
}
return path
}
+
+// MustTempDir returns a temporary directory for a partition. Panic on error.
+func MustTempPartitionDir() string {
+ path := MustTempDir()
+ path = filepath.Join(path, "0")
+ if err := os.Mkdir(path, 0777); err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// Series represents name/tagset pairs that are used in testing.
+type Series struct {
+ Name []byte
+ Tags models.Tags
+ Deleted bool
+}
+
+// SeriesFile is a test wrapper for tsdb.SeriesFile.
+type SeriesFile struct {
+ *tsdb.SeriesFile
+}
+
+// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
+func NewSeriesFile() *SeriesFile {
+ dir, err := ioutil.TempDir("", "tsdb-series-file-")
+ if err != nil {
+ panic(err)
+ }
+ return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
+}
+
+// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error.
+func MustOpenSeriesFile() *SeriesFile {
+ f := NewSeriesFile()
+ if err := f.Open(); err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// Close closes the log file and removes it from disk.
+func (f *SeriesFile) Close() error {
+ defer os.RemoveAll(f.Path())
+ return f.SeriesFile.Close()
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index_test.go
index 8e421a3d57..9bf7556947 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/index_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/index_test.go
@@ -3,6 +3,7 @@ package tsdb_test
import (
"fmt"
"io/ioutil"
+ "os"
"path/filepath"
"reflect"
"testing"
@@ -15,11 +16,46 @@ import (
"github.com/influxdata/influxql"
)
-func TestIndex_MeasurementNamesByExpr(t *testing.T) {
+// Ensure iterator can merge multiple iterators together.
+func TestMergeSeriesIDIterators(t *testing.T) {
+ itr := tsdb.MergeSeriesIDIterators(
+ tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3}),
+ tsdb.NewSeriesIDSliceIterator(nil),
+ tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3, 4}),
+ )
+
+ if e, err := itr.Next(); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 1}) {
+ t.Fatalf("unexpected elem(0): %#v", e)
+ }
+ if e, err := itr.Next(); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 2}) {
+ t.Fatalf("unexpected elem(1): %#v", e)
+ }
+ if e, err := itr.Next(); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 3}) {
+ t.Fatalf("unexpected elem(2): %#v", e)
+ }
+ if e, err := itr.Next(); err != nil {
+ t.Fatal(err)
+ } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 4}) {
+ t.Fatalf("unexpected elem(3): %#v", e)
+ }
+ if e, err := itr.Next(); err != nil {
+ t.Fatal(err)
+ } else if e.SeriesID != 0 {
+ t.Fatalf("expected nil elem: %#v", e)
+ }
+}
+
+func TestIndexSet_MeasurementNamesByExpr(t *testing.T) {
// Setup indexes
indexes := map[string]*Index{}
for _, name := range tsdb.RegisteredIndexes() {
- idx := NewIndex(name)
+ idx := MustNewIndex(name)
idx.AddSeries("cpu", map[string]string{"region": "east"})
idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"})
idx.AddSeries("disk", map[string]string{"secret": "foo"})
@@ -27,6 +63,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) {
idx.AddSeries("gpu", map[string]string{"region": "east"})
idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"})
indexes[name] = idx
+ defer idx.Close()
}
authorizer := &internal.AuthorizerMock{
@@ -68,7 +105,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) {
t.Run("no authorization", func(t *testing.T) {
for _, example := range examples {
t.Run(example.name, func(t *testing.T) {
- names, err := indexes[idx].MeasurementNamesByExpr(nil, example.expr)
+ names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(nil, example.expr)
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
@@ -81,7 +118,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) {
t.Run("with authorization", func(t *testing.T) {
for _, example := range authExamples {
t.Run(example.name, func(t *testing.T) {
- names, err := indexes[idx].MeasurementNamesByExpr(authorizer, example.expr)
+ names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(authorizer, example.expr)
if err != nil {
t.Fatal(err)
} else if !reflect.DeepEqual(names, example.expected) {
@@ -96,26 +133,58 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) {
type Index struct {
tsdb.Index
+ rootPath string
+ sfile *tsdb.SeriesFile
}
-func NewIndex(index string) *Index {
+func MustNewIndex(index string) *Index {
opts := tsdb.NewEngineOptions()
opts.IndexVersion = index
- if index == inmem.IndexName {
- opts.InmemIndex = inmem.NewIndex("db0")
+ rootPath, err := ioutil.TempDir("", "influxdb-tsdb")
+ if err != nil {
+ panic(err)
}
- path, err := ioutil.TempDir("", "influxdb-tsdb")
+ seriesPath, err := ioutil.TempDir(rootPath, tsdb.SeriesFileDirectory)
if err != nil {
panic(err)
}
- idx := &Index{Index: tsdb.MustOpenIndex(0, "db0", filepath.Join(path, "index"), opts)}
+
+ sfile := tsdb.NewSeriesFile(seriesPath)
+ if err := sfile.Open(); err != nil {
+ panic(err)
+ }
+
+ if index == inmem.IndexName {
+ opts.InmemIndex = inmem.NewIndex("db0", sfile)
+ }
+
+ idx := &Index{
+ Index: tsdb.MustOpenIndex(0, "db0", filepath.Join(rootPath, "index"), tsdb.NewSeriesIDSet(), sfile, opts),
+ rootPath: rootPath,
+ sfile: sfile,
+ }
return idx
}
+func (idx *Index) IndexSet() *tsdb.IndexSet {
+ return &tsdb.IndexSet{Indexes: []tsdb.Index{idx.Index}, SeriesFile: idx.sfile}
+}
+
func (idx *Index) AddSeries(name string, tags map[string]string) error {
t := models.NewTags(tags)
key := fmt.Sprintf("%s,%s", name, t.HashKey())
return idx.CreateSeriesIfNotExists([]byte(key), []byte(name), t)
}
+
+func (i *Index) Close() error {
+ if err := i.Index.Close(); err != nil {
+ return err
+ }
+
+ if err := i.sfile.Close(); err != nil {
+ return err
+ }
+ return os.RemoveAll(i.rootPath)
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go
index 68bb5abc70..8c2b8ab83e 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go
@@ -1,5 +1,6 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// Code generated by protoc-gen-gogo.
// source: internal/meta.proto
+// DO NOT EDIT!
/*
Package tsdb is a generated protocol buffer package.
@@ -104,9 +105,8 @@ func (m *MeasurementFields) GetFields() []*Field {
}
type Field struct {
- ID int32 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"`
- Type int32 `protobuf:"varint,3,opt,name=Type,proto3" json:"Type,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
+ Type int32 `protobuf:"varint,2,opt,name=Type,proto3" json:"Type,omitempty"`
}
func (m *Field) Reset() { *m = Field{} }
@@ -114,13 +114,6 @@ func (m *Field) String() string { return proto.CompactTextString(m) }
func (*Field) ProtoMessage() {}
func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{3} }
-func (m *Field) GetID() int32 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
func (m *Field) GetName() string {
if m != nil {
return m.Name
@@ -162,21 +155,20 @@ func init() {
func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) }
var fileDescriptorMeta = []byte{
- // 242 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbd, 0x4b, 0x03, 0x41,
- 0x10, 0xc5, 0xb9, 0xbd, 0x0f, 0xc8, 0x44, 0x44, 0x27, 0x82, 0xdb, 0x08, 0x61, 0x6d, 0xd2, 0x78,
- 0x82, 0x56, 0x62, 0x61, 0x13, 0x84, 0xe0, 0x47, 0xb1, 0x39, 0xec, 0x27, 0x64, 0x38, 0x0e, 0xee,
- 0x2e, 0x61, 0x77, 0x53, 0xe4, 0xbf, 0x97, 0xcc, 0x1e, 0x12, 0x35, 0xdd, 0xdb, 0x37, 0xf3, 0xe6,
- 0xfd, 0x58, 0x98, 0x34, 0x7d, 0x60, 0xd7, 0x53, 0x7b, 0xdf, 0x71, 0xa0, 0x72, 0xeb, 0x36, 0x61,
- 0x83, 0x59, 0xf0, 0xeb, 0x95, 0x79, 0x82, 0x62, 0xc9, 0xae, 0x61, 0x8f, 0x17, 0x90, 0xbe, 0xf1,
- 0x5e, 0x27, 0xd3, 0x64, 0x36, 0xb2, 0x07, 0x89, 0x37, 0x90, 0x55, 0x54, 0x7b, 0xad, 0xa6, 0xe9,
- 0x6c, 0xfc, 0x30, 0x2a, 0x0f, 0x81, 0xb2, 0xa2, 0xda, 0x8a, 0x6d, 0xee, 0x20, 0xad, 0xa8, 0x3e,
- 0x91, 0xbb, 0x82, 0xfc, 0x8b, 0xda, 0x1d, 0x6b, 0x25, 0x5e, 0x7c, 0x98, 0x77, 0xb8, 0xfc, 0x60,
- 0xf2, 0x3b, 0xc7, 0x1d, 0xf7, 0xe1, 0xb5, 0xe1, 0x76, 0xed, 0x11, 0x21, 0xfb, 0xa4, 0x8e, 0x87,
- 0xb4, 0x68, 0xbc, 0x85, 0x22, 0x4e, 0x87, 0xe2, 0x71, 0x2c, 0x16, 0xcf, 0x0e, 0x23, 0xf3, 0x02,
- 0xb9, 0x28, 0x3c, 0x07, 0xb5, 0x98, 0x4b, 0x3e, 0xb7, 0x6a, 0x31, 0xff, 0xb9, 0xa8, 0x8e, 0x2e,
- 0x22, 0x64, 0xd5, 0x7e, 0xcb, 0x3a, 0x95, 0x2d, 0xd1, 0xc6, 0xc2, 0xe4, 0x2f, 0xce, 0x92, 0x03,
- 0x3e, 0xc3, 0xd9, 0x91, 0xed, 0x75, 0x22, 0x08, 0xd7, 0x11, 0xe1, 0x1f, 0xbf, 0xfd, 0xb5, 0xbc,
- 0x2a, 0xe4, 0x67, 0x1f, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xac, 0xee, 0x08, 0x52, 0x70, 0x01,
- 0x00, 0x00,
+ // 225 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbf, 0x6b, 0xc3, 0x30,
+ 0x10, 0x85, 0x71, 0x2c, 0x1b, 0x72, 0xe9, 0xd0, 0x5e, 0x0a, 0xd5, 0x52, 0x08, 0xea, 0x92, 0xa5,
+ 0x0e, 0xb4, 0x53, 0xe9, 0xde, 0xa5, 0x3f, 0x06, 0x45, 0x74, 0xbf, 0x90, 0xc3, 0x18, 0x6c, 0x27,
+ 0x48, 0xca, 0x90, 0xff, 0xbe, 0xf8, 0xe4, 0xa1, 0x6d, 0xbc, 0x3d, 0x7d, 0xa7, 0xa7, 0x4f, 0x1c,
+ 0x2c, 0x9b, 0x3e, 0xb2, 0xef, 0xa9, 0xdd, 0x74, 0x1c, 0xa9, 0x3a, 0xfa, 0x43, 0x3c, 0xa0, 0x8a,
+ 0x61, 0xbf, 0x33, 0x2f, 0x50, 0x6e, 0xd9, 0x37, 0x1c, 0xf0, 0x1a, 0xf2, 0x77, 0x3e, 0xeb, 0x6c,
+ 0x95, 0xad, 0xe7, 0x76, 0x88, 0x78, 0x0f, 0xca, 0x51, 0x1d, 0xf4, 0x6c, 0x95, 0xaf, 0x17, 0x4f,
+ 0xf3, 0x6a, 0x28, 0x54, 0x8e, 0x6a, 0x2b, 0xd8, 0x3c, 0x42, 0xee, 0xa8, 0x9e, 0xe8, 0xdd, 0x42,
+ 0xf1, 0x4d, 0xed, 0x89, 0xf5, 0x4c, 0x58, 0x3a, 0x98, 0x0f, 0xb8, 0xf9, 0x64, 0x0a, 0x27, 0xcf,
+ 0x1d, 0xf7, 0xf1, 0xad, 0xe1, 0x76, 0x1f, 0x10, 0x41, 0x7d, 0x51, 0xc7, 0x63, 0x5b, 0x32, 0x3e,
+ 0x40, 0x99, 0xa6, 0xa3, 0x78, 0x91, 0xc4, 0xc2, 0xec, 0x38, 0x32, 0x1b, 0x28, 0x24, 0x4d, 0xbe,
+ 0x80, 0xa0, 0xdc, 0xf9, 0x98, 0xfc, 0x85, 0x95, 0x6c, 0x2c, 0x2c, 0xff, 0xeb, 0xb7, 0x1c, 0xf1,
+ 0x15, 0xae, 0x7e, 0xe1, 0xa0, 0x33, 0x51, 0xde, 0x25, 0xe5, 0xc5, 0x7f, 0xed, 0x9f, 0xcb, 0xbb,
+ 0x52, 0x36, 0xf9, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x31, 0x1f, 0xb9, 0x60, 0x01, 0x00,
+ 0x00,
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go b/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go
index fff9fda46c..72f8343217 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go
@@ -7,7 +7,6 @@ import (
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/tsdb"
- "github.com/influxdata/influxdb/tsdb/index/inmem"
)
// Ensure tags can be marshaled into a byte slice.
@@ -142,18 +141,20 @@ func benchmarkMakeTagsKey(b *testing.B, keyN int) {
type TestSeries struct {
Measurement string
- Series *inmem.Series
+ Key string
+ Tags models.Tags
}
func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries {
measurements := genStrList("measurement", mCnt)
tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets()
- series := []*TestSeries{}
+ var series []*TestSeries
for _, m := range measurements {
for _, ts := range tagSets {
series = append(series, &TestSeries{
Measurement: m,
- Series: inmem.NewSeries([]byte(fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts)))), models.NewTags(ts)),
+ Key: fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))),
+ Tags: models.NewTags(ts),
})
}
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_file.go b/vendor/github.com/influxdata/influxdb/tsdb/series_file.go
new file mode 100644
index 0000000000..383608f83a
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_file.go
@@ -0,0 +1,429 @@
+package tsdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "github.com/cespare/xxhash"
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/binaryutil"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ ErrSeriesFileClosed = errors.New("tsdb: series file closed")
+ ErrInvalidSeriesPartitionID = errors.New("tsdb: invalid series partition id")
+)
+
+// SeriesIDSize is the size in bytes of a series key ID.
+const SeriesIDSize = 8
+
+const (
+ // SeriesFilePartitionN is the number of partitions a series file is split into.
+ SeriesFilePartitionN = 8
+)
+
+// SeriesFile represents the section of the index that holds series data.
+type SeriesFile struct {
+ path string
+ partitions []*SeriesPartition
+
+ Logger *zap.Logger
+}
+
+// NewSeriesFile returns a new instance of SeriesFile.
+func NewSeriesFile(path string) *SeriesFile {
+ return &SeriesFile{
+ path: path,
+ Logger: zap.NewNop(),
+ }
+}
+
+// Open memory maps the data file at the file's path.
+func (f *SeriesFile) Open() error {
+ // Create path if it doesn't exist.
+ if err := os.MkdirAll(filepath.Join(f.path), 0777); err != nil {
+ return err
+ }
+
+ // Open partitions.
+ f.partitions = make([]*SeriesPartition, 0, SeriesFilePartitionN)
+ for i := 0; i < SeriesFilePartitionN; i++ {
+ p := NewSeriesPartition(i, f.SeriesPartitionPath(i))
+ p.Logger = f.Logger.With(zap.Int("partition", p.ID()))
+ if err := p.Open(); err != nil {
+ f.Close()
+ return err
+ }
+ f.partitions = append(f.partitions, p)
+ }
+
+ return nil
+}
+
+// Close unmaps the data file.
+func (f *SeriesFile) Close() (err error) {
+ for _, p := range f.partitions {
+ if e := p.Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ f.partitions = nil
+ return err
+}
+
+// Path returns the path to the file.
+func (f *SeriesFile) Path() string { return f.path }
+
+// SeriesPartitionPath returns the path to a given partition.
+func (f *SeriesFile) SeriesPartitionPath(i int) string {
+ return filepath.Join(f.path, fmt.Sprintf("%02x", i))
+}
+
+// Partitions returns all partitions.
+func (f *SeriesFile) Partitions() []*SeriesPartition { return f.partitions }
+
+// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist.
+// The returned ids list returns values for new series and zero for existing series.
+func (f *SeriesFile) CreateSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, err error) {
+ keys := GenerateSeriesKeys(names, tagsSlice)
+ keyPartitionIDs := f.SeriesKeysPartitionIDs(keys)
+ ids = make([]uint64, len(keys))
+
+ var g errgroup.Group
+ for i := range f.partitions {
+ p := f.partitions[i]
+ g.Go(func() error {
+ return p.CreateSeriesListIfNotExists(keys, keyPartitionIDs, ids)
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// DeleteSeriesID flags a series as permanently deleted.
+// If the series is reintroduced later then it must create a new id.
+func (f *SeriesFile) DeleteSeriesID(id uint64) error {
+ p := f.SeriesIDPartition(id)
+ if p == nil {
+ return ErrInvalidSeriesPartitionID
+ }
+ return p.DeleteSeriesID(id)
+}
+
+// IsDeleted returns true if the ID has been deleted before.
+func (f *SeriesFile) IsDeleted(id uint64) bool {
+ p := f.SeriesIDPartition(id)
+ if p == nil {
+ return false
+ }
+ return p.IsDeleted(id)
+}
+
+// SeriesKey returns the series key for a given id.
+func (f *SeriesFile) SeriesKey(id uint64) []byte {
+ if id == 0 {
+ return nil
+ }
+ p := f.SeriesIDPartition(id)
+ if p == nil {
+ return nil
+ }
+ return p.SeriesKey(id)
+}
+
+// SeriesKeys returns a list of series keys from a list of ids.
+func (f *SeriesFile) SeriesKeys(ids []uint64) [][]byte {
+ keys := make([][]byte, len(ids))
+ for i := range ids {
+ keys[i] = f.SeriesKey(ids[i])
+ }
+ return keys
+}
+
+// Series returns the parsed series name and tags for an offset.
+func (f *SeriesFile) Series(id uint64) ([]byte, models.Tags) {
+ key := f.SeriesKey(id)
+ if key == nil {
+ return nil, nil
+ }
+ return ParseSeriesKey(key)
+}
+
+// SeriesID return the series id for the series.
+func (f *SeriesFile) SeriesID(name []byte, tags models.Tags, buf []byte) uint64 {
+ key := AppendSeriesKey(buf[:0], name, tags)
+ keyPartition := f.SeriesKeyPartition(key)
+ if keyPartition == nil {
+ return 0
+ }
+ return keyPartition.FindIDBySeriesKey(key)
+}
+
+// HasSeries return true if the series exists.
+func (f *SeriesFile) HasSeries(name []byte, tags models.Tags, buf []byte) bool {
+ return f.SeriesID(name, tags, buf) > 0
+}
+
+// SeriesCount returns the number of series.
+func (f *SeriesFile) SeriesCount() uint64 {
+ var n uint64
+ for _, p := range f.partitions {
+ n += p.SeriesCount()
+ }
+ return n
+}
+
+// SeriesIterator returns an iterator over all the series.
+func (f *SeriesFile) SeriesIDIterator() SeriesIDIterator {
+ var ids []uint64
+ for _, p := range f.partitions {
+ ids = p.AppendSeriesIDs(ids)
+ }
+ sort.Sort(uint64Slice(ids))
+ return NewSeriesIDSliceIterator(ids)
+}
+
+func (f *SeriesFile) SeriesIDPartitionID(id uint64) int {
+ return int(id & 0xFF)
+}
+
+func (f *SeriesFile) SeriesIDPartition(id uint64) *SeriesPartition {
+ partitionID := f.SeriesIDPartitionID(id)
+ if partitionID >= len(f.partitions) {
+ return nil
+ }
+ return f.partitions[partitionID]
+}
+
+func (f *SeriesFile) SeriesKeysPartitionIDs(keys [][]byte) []int {
+ partitionIDs := make([]int, len(keys))
+ for i := range keys {
+ partitionIDs[i] = f.SeriesKeyPartitionID(keys[i])
+ }
+ return partitionIDs
+}
+
+func (f *SeriesFile) SeriesKeyPartitionID(key []byte) int {
+ return int(xxhash.Sum64(key) % SeriesFilePartitionN)
+}
+
+func (f *SeriesFile) SeriesKeyPartition(key []byte) *SeriesPartition {
+ partitionID := f.SeriesKeyPartitionID(key)
+ if partitionID >= len(f.partitions) {
+ return nil
+ }
+ return f.partitions[partitionID]
+}
+
+// AppendSeriesKey serializes name and tags to a byte slice.
+// The total length is prepended as a uvarint.
+func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte {
+ buf := make([]byte, binary.MaxVarintLen64)
+ origLen := len(dst)
+
+ // The tag count is variable encoded, so we need to know ahead of time what
+ // the size of the tag count value will be.
+ tcBuf := make([]byte, binary.MaxVarintLen64)
+ tcSz := binary.PutUvarint(tcBuf, uint64(len(tags)))
+
+ // Size of name/tags. Does not include total length.
+ size := 0 + //
+ 2 + // size of measurement
+ len(name) + // measurement
+ tcSz + // size of number of tags
+ (4 * len(tags)) + // length of each tag key and value
+ tags.Size() // size of tag keys/values
+
+ // Variable encode length.
+ totalSz := binary.PutUvarint(buf, uint64(size))
+
+ // If caller doesn't provide a buffer then pre-allocate an exact one.
+ if dst == nil {
+ dst = make([]byte, 0, size+totalSz)
+ }
+
+ // Append total length.
+ dst = append(dst, buf[:totalSz]...)
+
+ // Append name.
+ binary.BigEndian.PutUint16(buf, uint16(len(name)))
+ dst = append(dst, buf[:2]...)
+ dst = append(dst, name...)
+
+ // Append tag count.
+ dst = append(dst, tcBuf[:tcSz]...)
+
+ // Append tags.
+ for _, tag := range tags {
+ binary.BigEndian.PutUint16(buf, uint16(len(tag.Key)))
+ dst = append(dst, buf[:2]...)
+ dst = append(dst, tag.Key...)
+
+ binary.BigEndian.PutUint16(buf, uint16(len(tag.Value)))
+ dst = append(dst, buf[:2]...)
+ dst = append(dst, tag.Value...)
+ }
+
+ // Verify that the total length equals the encoded byte count.
+ if got, exp := len(dst)-origLen, size+totalSz; got != exp {
+ panic(fmt.Sprintf("series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x", got, exp, dst))
+ }
+
+ return dst
+}
+
+// ReadSeriesKey returns the series key from the beginning of the buffer.
+func ReadSeriesKey(data []byte) (key, remainder []byte) {
+ sz, n := binary.Uvarint(data)
+ return data[:int(sz)+n], data[int(sz)+n:]
+}
+
+func ReadSeriesKeyLen(data []byte) (sz int, remainder []byte) {
+ sz64, i := binary.Uvarint(data)
+ return int(sz64), data[i:]
+}
+
+func ReadSeriesKeyMeasurement(data []byte) (name, remainder []byte) {
+ n, data := binary.BigEndian.Uint16(data), data[2:]
+ return data[:n], data[n:]
+}
+
+func ReadSeriesKeyTagN(data []byte) (n int, remainder []byte) {
+ n64, i := binary.Uvarint(data)
+ return int(n64), data[i:]
+}
+
+func ReadSeriesKeyTag(data []byte) (key, value, remainder []byte) {
+ n, data := binary.BigEndian.Uint16(data), data[2:]
+ key, data = data[:n], data[n:]
+
+ n, data = binary.BigEndian.Uint16(data), data[2:]
+ value, data = data[:n], data[n:]
+ return key, value, data
+}
+
+// ParseSeriesKey extracts the name & tags from a series key.
+func ParseSeriesKey(data []byte) (name []byte, tags models.Tags) {
+ _, data = ReadSeriesKeyLen(data)
+ name, data = ReadSeriesKeyMeasurement(data)
+
+ tagN, data := ReadSeriesKeyTagN(data)
+ tags = make(models.Tags, tagN)
+ for i := 0; i < tagN; i++ {
+ var key, value []byte
+ key, value, data = ReadSeriesKeyTag(data)
+ tags[i] = models.Tag{Key: key, Value: value}
+ }
+
+ return name, tags
+}
+
+func CompareSeriesKeys(a, b []byte) int {
+ // Handle 'nil' keys.
+ if len(a) == 0 && len(b) == 0 {
+ return 0
+ } else if len(a) == 0 {
+ return -1
+ } else if len(b) == 0 {
+ return 1
+ }
+
+ // Read total size.
+ _, a = ReadSeriesKeyLen(a)
+ _, b = ReadSeriesKeyLen(b)
+
+ // Read names.
+ name0, a := ReadSeriesKeyMeasurement(a)
+ name1, b := ReadSeriesKeyMeasurement(b)
+
+ // Compare names, return if not equal.
+ if cmp := bytes.Compare(name0, name1); cmp != 0 {
+ return cmp
+ }
+
+ // Read tag counts.
+ tagN0, a := ReadSeriesKeyTagN(a)
+ tagN1, b := ReadSeriesKeyTagN(b)
+
+ // Compare each tag in order.
+ for i := 0; ; i++ {
+ // Check for EOF.
+ if i == tagN0 && i == tagN1 {
+ return 0
+ } else if i == tagN0 {
+ return -1
+ } else if i == tagN1 {
+ return 1
+ }
+
+ // Read keys.
+ var key0, key1, value0, value1 []byte
+ key0, value0, a = ReadSeriesKeyTag(a)
+ key1, value1, b = ReadSeriesKeyTag(b)
+
+ // Compare keys & values.
+ if cmp := bytes.Compare(key0, key1); cmp != 0 {
+ return cmp
+ } else if cmp := bytes.Compare(value0, value1); cmp != 0 {
+ return cmp
+ }
+ }
+}
+
+// GenerateSeriesKeys generates series keys for a list of names & tags using
+// a single large memory block.
+func GenerateSeriesKeys(names [][]byte, tagsSlice []models.Tags) [][]byte {
+ buf := make([]byte, 0, SeriesKeysSize(names, tagsSlice))
+ keys := make([][]byte, len(names))
+ for i := range names {
+ offset := len(buf)
+ buf = AppendSeriesKey(buf, names[i], tagsSlice[i])
+ keys[i] = buf[offset:]
+ }
+ return keys
+}
+
+// SeriesKeysSize returns the number of bytes required to encode a list of name/tags.
+func SeriesKeysSize(names [][]byte, tagsSlice []models.Tags) int {
+ var n int
+ for i := range names {
+ n += SeriesKeySize(names[i], tagsSlice[i])
+ }
+ return n
+}
+
+// SeriesKeySize returns the number of bytes required to encode a series key.
+func SeriesKeySize(name []byte, tags models.Tags) int {
+ var n int
+ n += 2 + len(name)
+ n += binaryutil.UvarintSize(uint64(len(tags)))
+ for _, tag := range tags {
+ n += 2 + len(tag.Key)
+ n += 2 + len(tag.Value)
+ }
+ n += binaryutil.UvarintSize(uint64(n))
+ return n
+}
+
+type seriesKeys [][]byte
+
+func (a seriesKeys) Len() int { return len(a) }
+func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a seriesKeys) Less(i, j int) bool {
+ return CompareSeriesKeys(a[i], a[j]) == -1
+}
+
+type uint64Slice []uint64
+
+func (a uint64Slice) Len() int { return len(a) }
+func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] }
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go
new file mode 100644
index 0000000000..d13f516bca
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go
@@ -0,0 +1,124 @@
+package tsdb_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/influxdata/influxdb/logger"
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/tsdb"
+)
+
+// Ensure series file contains the correct set of series.
+func TestSeriesFile_Series(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ series := []Series{
+ {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})},
+ {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})},
+ {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})},
+ }
+ for _, s := range series {
+ if _, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte(s.Name)}, []models.Tags{s.Tags}, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Verify total number of series is correct.
+ if n := sfile.SeriesCount(); n != 3 {
+ t.Fatalf("unexpected series count: %d", n)
+ }
+
+ // Verify all series exist.
+ for i, s := range series {
+ if seriesID := sfile.SeriesID(s.Name, s.Tags, nil); seriesID == 0 {
+ t.Fatalf("series does not exist: i=%d", i)
+ }
+ }
+
+ // Verify non-existent series doesn't exist.
+ if sfile.HasSeries([]byte("foo"), models.NewTags(map[string]string{"region": "north"}), nil) {
+ t.Fatal("series should not exist")
+ }
+}
+
+// Ensure series file can be compacted.
+func TestSeriesFileCompactor(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ // Disable automatic compactions.
+ for _, p := range sfile.Partitions() {
+ p.CompactThreshold = 0
+ }
+
+ var names [][]byte
+ var tagsSlice []models.Tags
+ for i := 0; i < 10000; i++ {
+ names = append(names, []byte(fmt.Sprintf("m%d", i)))
+ tagsSlice = append(tagsSlice, models.NewTags(map[string]string{"foo": "bar"}))
+ }
+ if _, err := sfile.CreateSeriesListIfNotExists(names, tagsSlice, nil); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify total number of series is correct.
+ if n := sfile.SeriesCount(); n != uint64(len(names)) {
+ t.Fatalf("unexpected series count: %d", n)
+ }
+
+ // Compact in-place for each partition.
+ for _, p := range sfile.Partitions() {
+ compactor := tsdb.NewSeriesPartitionCompactor()
+ if err := compactor.Compact(p); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Verify all series exist.
+ for i := range names {
+ if seriesID := sfile.SeriesID(names[i], tagsSlice[i], nil); seriesID == 0 {
+ t.Fatalf("series does not exist: %s,%s", names[i], tagsSlice[i].String())
+ }
+ }
+}
+
+// Series represents name/tagset pairs that are used in testing.
+type Series struct {
+ Name []byte
+ Tags models.Tags
+ Deleted bool
+}
+
+// SeriesFile is a test wrapper for tsdb.SeriesFile.
+type SeriesFile struct {
+ *tsdb.SeriesFile
+}
+
+// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
+func NewSeriesFile() *SeriesFile {
+ dir, err := ioutil.TempDir("", "tsdb-series-file-")
+ if err != nil {
+ panic(err)
+ }
+ return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
+}
+
+// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error.
+func MustOpenSeriesFile() *SeriesFile {
+ f := NewSeriesFile()
+ f.Logger = logger.New(os.Stdout)
+ if err := f.Open(); err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// Close closes the log file and removes it from disk.
+func (f *SeriesFile) Close() error {
+ defer os.RemoveAll(f.Path())
+ return f.SeriesFile.Close()
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_index.go b/vendor/github.com/influxdata/influxdb/tsdb/series_index.go
new file mode 100644
index 0000000000..ea37629c19
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_index.go
@@ -0,0 +1,365 @@
+package tsdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/mmap"
+ "github.com/influxdata/influxdb/pkg/rhh"
+)
+
+const (
+ SeriesIndexVersion = 1
+ SeriesIndexMagic = "SIDX"
+)
+
+const (
+ SeriesIndexElemSize = 16 // offset + id
+ SeriesIndexLoadFactor = 90 // rhh load factor
+
+ SeriesIndexHeaderSize = 0 +
+ 4 + 1 + // magic + version
+ 8 + 8 + // max series + max offset
+ 8 + 8 + // count + capacity
+ 8 + 8 + // key/id map offset & size
+ 8 + 8 + // id/offset map offset & size
+ 0
+)
+
+var ErrInvalidSeriesIndex = errors.New("invalid series index")
+
+// SeriesIndex represents an index of key-to-id & id-to-offset mappings.
+type SeriesIndex struct {
+ path string
+
+ count uint64
+ capacity int64
+ mask int64
+
+ maxSeriesID uint64
+ maxOffset int64
+
+ data []byte // mmap data
+ keyIDData []byte // key/id mmap data
+ idOffsetData []byte // id/offset mmap data
+
+ // In-memory data since rebuild.
+ keyIDMap *rhh.HashMap
+ idOffsetMap map[uint64]int64
+ tombstones map[uint64]struct{}
+}
+
+func NewSeriesIndex(path string) *SeriesIndex {
+ return &SeriesIndex{
+ path: path,
+ }
+}
+
+// Open memory-maps the index file.
+func (idx *SeriesIndex) Open() (err error) {
+ // Map data file, if it exists.
+ if err := func() error {
+ if _, err := os.Stat(idx.path); err != nil && !os.IsNotExist(err) {
+ return err
+ } else if err == nil {
+ if idx.data, err = mmap.Map(idx.path, 0); err != nil {
+ return err
+ }
+
+ hdr, err := ReadSeriesIndexHeader(idx.data)
+ if err != nil {
+ return err
+ }
+ idx.count, idx.capacity, idx.mask = hdr.Count, hdr.Capacity, hdr.Capacity-1
+ idx.maxSeriesID, idx.maxOffset = hdr.MaxSeriesID, hdr.MaxOffset
+
+ idx.keyIDData = idx.data[hdr.KeyIDMap.Offset : hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size]
+ idx.idOffsetData = idx.data[hdr.IDOffsetMap.Offset : hdr.IDOffsetMap.Offset+hdr.IDOffsetMap.Size]
+ }
+ return nil
+ }(); err != nil {
+ idx.Close()
+ return err
+ }
+
+ idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions)
+ idx.idOffsetMap = make(map[uint64]int64)
+ idx.tombstones = make(map[uint64]struct{})
+ return nil
+}
+
+// Close unmaps the index file.
+func (idx *SeriesIndex) Close() (err error) {
+ if idx.data != nil {
+ err = mmap.Unmap(idx.data)
+ }
+ idx.keyIDData = nil
+ idx.idOffsetData = nil
+
+ idx.keyIDMap = nil
+ idx.idOffsetMap = nil
+ idx.tombstones = nil
+ return err
+}
+
+// Recover rebuilds the in-memory index for all new entries.
+func (idx *SeriesIndex) Recover(segments []*SeriesSegment) error {
+ // Allocate new in-memory maps.
+ idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions)
+ idx.idOffsetMap = make(map[uint64]int64)
+ idx.tombstones = make(map[uint64]struct{})
+
+ // Process all entries since the maximum offset in the on-disk index.
+ minSegmentID, _ := SplitSeriesOffset(idx.maxOffset)
+ for _, segment := range segments {
+ if segment.ID() < minSegmentID {
+ continue
+ }
+
+ if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error {
+ if offset <= idx.maxOffset {
+ return nil
+ }
+ idx.execEntry(flag, id, offset, key)
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Count returns the number of series in the index.
+func (idx *SeriesIndex) Count() uint64 {
+ return idx.OnDiskCount() + idx.InMemCount()
+}
+
+// OnDiskCount returns the number of series in the on-disk index.
+func (idx *SeriesIndex) OnDiskCount() uint64 { return idx.count }
+
+// InMemCount returns the number of series in the in-memory index.
+func (idx *SeriesIndex) InMemCount() uint64 { return uint64(len(idx.idOffsetMap)) }
+
+func (idx *SeriesIndex) Insert(key []byte, id uint64, offset int64) {
+ idx.execEntry(SeriesEntryInsertFlag, id, offset, key)
+}
+
+// Delete marks the series id as deleted.
+func (idx *SeriesIndex) Delete(id uint64) {
+ idx.execEntry(SeriesEntryTombstoneFlag, id, 0, nil)
+}
+
+// IsDeleted returns true if series id has been deleted.
+func (idx *SeriesIndex) IsDeleted(id uint64) bool {
+ _, ok := idx.tombstones[id]
+ return ok
+}
+
+func (idx *SeriesIndex) execEntry(flag uint8, id uint64, offset int64, key []byte) {
+ switch flag {
+ case SeriesEntryInsertFlag:
+ idx.keyIDMap.Put(key, id)
+ idx.idOffsetMap[id] = offset
+
+ if id > idx.maxSeriesID {
+ idx.maxSeriesID = id
+ }
+ if offset > idx.maxOffset {
+ idx.maxOffset = offset
+ }
+
+ case SeriesEntryTombstoneFlag:
+ idx.tombstones[id] = struct{}{}
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (idx *SeriesIndex) FindIDBySeriesKey(segments []*SeriesSegment, key []byte) uint64 {
+ if v := idx.keyIDMap.Get(key); v != nil {
+ if id, _ := v.(uint64); id != 0 && !idx.IsDeleted(id) {
+ return id
+ }
+ }
+ if len(idx.data) == 0 {
+ return 0
+ }
+
+ hash := rhh.HashKey(key)
+ for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask {
+ elem := idx.keyIDData[(pos * SeriesIndexElemSize):]
+ elemOffset := int64(binary.BigEndian.Uint64(elem[:8]))
+
+ if elemOffset == 0 {
+ return 0
+ }
+
+ elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize)
+ elemHash := rhh.HashKey(elemKey)
+ if d > rhh.Dist(elemHash, pos, idx.capacity) {
+ return 0
+ } else if elemHash == hash && bytes.Equal(elemKey, key) {
+ id := binary.BigEndian.Uint64(elem[8:])
+ if idx.IsDeleted(id) {
+ return 0
+ }
+ return id
+ }
+ }
+}
+
+func (idx *SeriesIndex) FindIDByNameTags(segments []*SeriesSegment, name []byte, tags models.Tags, buf []byte) uint64 {
+ id := idx.FindIDBySeriesKey(segments, AppendSeriesKey(buf[:0], name, tags))
+ if _, ok := idx.tombstones[id]; ok {
+ return 0
+ }
+ return id
+}
+
+func (idx *SeriesIndex) FindIDListByNameTags(segments []*SeriesSegment, names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, ok bool) {
+ ids, ok = make([]uint64, len(names)), true
+ for i := range names {
+ id := idx.FindIDByNameTags(segments, names[i], tagsSlice[i], buf)
+ if id == 0 {
+ ok = false
+ continue
+ }
+ ids[i] = id
+ }
+ return ids, ok
+}
+
+func (idx *SeriesIndex) FindOffsetByID(id uint64) int64 {
+ if offset := idx.idOffsetMap[id]; offset != 0 {
+ return offset
+ } else if len(idx.data) == 0 {
+ return 0
+ }
+
+ hash := rhh.HashUint64(id)
+ for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask {
+ elem := idx.idOffsetData[(pos * SeriesIndexElemSize):]
+ elemID := binary.BigEndian.Uint64(elem[:8])
+
+ if elemID == id {
+ return int64(binary.BigEndian.Uint64(elem[8:]))
+ } else if elemID == 0 || d > rhh.Dist(rhh.HashUint64(elemID), pos, idx.capacity) {
+ return 0
+ }
+ }
+}
+
+// Clone returns a copy of idx for use during compaction. In-memory maps are not cloned.
+func (idx *SeriesIndex) Clone() *SeriesIndex {
+ tombstones := make(map[uint64]struct{}, len(idx.tombstones))
+ for id := range idx.tombstones {
+ tombstones[id] = struct{}{}
+ }
+
+ return &SeriesIndex{
+ path: idx.path,
+ count: idx.count,
+ capacity: idx.capacity,
+ mask: idx.mask,
+ maxSeriesID: idx.maxSeriesID,
+ maxOffset: idx.maxOffset,
+ data: idx.data,
+ keyIDData: idx.keyIDData,
+ idOffsetData: idx.idOffsetData,
+ tombstones: tombstones,
+ }
+}
+
+// SeriesIndexHeader represents the header of a series index.
+type SeriesIndexHeader struct {
+ Version uint8
+
+ MaxSeriesID uint64
+ MaxOffset int64
+
+ Count uint64
+ Capacity int64
+
+ KeyIDMap struct {
+ Offset int64
+ Size int64
+ }
+
+ IDOffsetMap struct {
+ Offset int64
+ Size int64
+ }
+}
+
+// NewSeriesIndexHeader returns a new instance of SeriesIndexHeader.
+func NewSeriesIndexHeader() SeriesIndexHeader {
+ return SeriesIndexHeader{Version: SeriesIndexVersion}
+}
+
+// ReadSeriesIndexHeader returns the header from data.
+func ReadSeriesIndexHeader(data []byte) (hdr SeriesIndexHeader, err error) {
+ r := bytes.NewReader(data)
+
+ // Read magic number.
+ magic := make([]byte, len(SeriesIndexMagic))
+ if _, err := io.ReadFull(r, magic); err != nil {
+ return hdr, err
+ } else if !bytes.Equal([]byte(SeriesIndexMagic), magic) {
+ return hdr, ErrInvalidSeriesIndex
+ }
+
+ // Read version.
+ if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil {
+ return hdr, err
+ }
+
+ // Read max offset.
+ if err := binary.Read(r, binary.BigEndian, &hdr.MaxSeriesID); err != nil {
+ return hdr, err
+ } else if err := binary.Read(r, binary.BigEndian, &hdr.MaxOffset); err != nil {
+ return hdr, err
+ }
+
+ // Read count & capacity.
+ if err := binary.Read(r, binary.BigEndian, &hdr.Count); err != nil {
+ return hdr, err
+ } else if err := binary.Read(r, binary.BigEndian, &hdr.Capacity); err != nil {
+ return hdr, err
+ }
+
+ // Read key/id map position.
+ if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Offset); err != nil {
+ return hdr, err
+ } else if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Size); err != nil {
+ return hdr, err
+ }
+
+ // Read offset/id map position.
+ if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Offset); err != nil {
+ return hdr, err
+ } else if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Size); err != nil {
+ return hdr, err
+ }
+ return hdr, nil
+}
+
+// WriteTo writes the header to w.
+func (hdr *SeriesIndexHeader) WriteTo(w io.Writer) (n int64, err error) {
+ var buf bytes.Buffer
+ buf.WriteString(SeriesIndexMagic)
+ binary.Write(&buf, binary.BigEndian, hdr.Version)
+ binary.Write(&buf, binary.BigEndian, hdr.MaxSeriesID)
+ binary.Write(&buf, binary.BigEndian, hdr.MaxOffset)
+ binary.Write(&buf, binary.BigEndian, hdr.Count)
+ binary.Write(&buf, binary.BigEndian, hdr.Capacity)
+ binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Offset)
+ binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Size)
+ binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Offset)
+ binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Size)
+ return buf.WriteTo(w)
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go
new file mode 100644
index 0000000000..fa24dcc65b
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go
@@ -0,0 +1,132 @@
+package tsdb_test
+
+import (
+ "bytes"
+ "path/filepath"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/influxdata/influxdb/tsdb"
+)
+
+func TestSeriesIndex_Count(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
+ if err := idx.Open(); err != nil {
+ t.Fatal(err)
+ }
+ defer idx.Close()
+
+ key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil)
+ idx.Insert(key0, 1, 10)
+ key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil)
+ idx.Insert(key1, 2, 20)
+
+ if n := idx.Count(); n != 2 {
+ t.Fatalf("unexpected count: %d", n)
+ }
+}
+
+func TestSeriesIndex_Delete(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
+ if err := idx.Open(); err != nil {
+ t.Fatal(err)
+ }
+ defer idx.Close()
+
+ key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil)
+ idx.Insert(key0, 1, 10)
+ key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil)
+ idx.Insert(key1, 2, 20)
+ idx.Delete(1)
+
+ if !idx.IsDeleted(1) {
+ t.Fatal("expected deletion")
+ } else if idx.IsDeleted(2) {
+ t.Fatal("expected series to exist")
+ }
+}
+
+func TestSeriesIndex_FindIDBySeriesKey(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
+ if err := idx.Open(); err != nil {
+ t.Fatal(err)
+ }
+ defer idx.Close()
+
+ key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil)
+ idx.Insert(key0, 1, 10)
+ key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil)
+ idx.Insert(key1, 2, 20)
+ badKey := tsdb.AppendSeriesKey(nil, []byte("not_found"), nil)
+
+ if id := idx.FindIDBySeriesKey(nil, key0); id != 1 {
+ t.Fatalf("unexpected id(0): %d", id)
+ } else if id := idx.FindIDBySeriesKey(nil, key1); id != 2 {
+ t.Fatalf("unexpected id(1): %d", id)
+ } else if id := idx.FindIDBySeriesKey(nil, badKey); id != 0 {
+ t.Fatalf("unexpected id(2): %d", id)
+ }
+
+ if id := idx.FindIDByNameTags(nil, []byte("m0"), nil, nil); id != 1 {
+ t.Fatalf("unexpected id(0): %d", id)
+ } else if id := idx.FindIDByNameTags(nil, []byte("m1"), nil, nil); id != 2 {
+ t.Fatalf("unexpected id(1): %d", id)
+ } else if id := idx.FindIDByNameTags(nil, []byte("not_found"), nil, nil); id != 0 {
+ t.Fatalf("unexpected id(2): %d", id)
+ }
+}
+
+func TestSeriesIndex_FindOffsetByID(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
+ if err := idx.Open(); err != nil {
+ t.Fatal(err)
+ }
+ defer idx.Close()
+
+ idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m0"), nil), 1, 10)
+ idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m1"), nil), 2, 20)
+
+ if offset := idx.FindOffsetByID(1); offset != 10 {
+ t.Fatalf("unexpected offset(0): %d", offset)
+ } else if offset := idx.FindOffsetByID(2); offset != 20 {
+ t.Fatalf("unexpected offset(1): %d", offset)
+ } else if offset := idx.FindOffsetByID(3); offset != 0 {
+ t.Fatalf("unexpected offset(2): %d", offset)
+ }
+}
+
+func TestSeriesIndexHeader(t *testing.T) {
+ // Verify header initializes correctly.
+ hdr := tsdb.NewSeriesIndexHeader()
+ if hdr.Version != tsdb.SeriesIndexVersion {
+ t.Fatalf("unexpected version: %d", hdr.Version)
+ }
+ hdr.MaxSeriesID = 10
+ hdr.MaxOffset = 20
+ hdr.Count = 30
+ hdr.Capacity = 40
+ hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = 50, 60
+ hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = 70, 80
+
+ // Marshal/unmarshal.
+ var buf bytes.Buffer
+ if _, err := hdr.WriteTo(&buf); err != nil {
+ t.Fatal(err)
+ } else if other, err := tsdb.ReadSeriesIndexHeader(buf.Bytes()); err != nil {
+ t.Fatal(err)
+ } else if diff := cmp.Diff(hdr, other); diff != "" {
+ t.Fatal(diff)
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go b/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go
new file mode 100644
index 0000000000..565bc94bf4
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go
@@ -0,0 +1,665 @@
+package tsdb
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/rhh"
+ "go.uber.org/zap"
+)
+
+var (
+ ErrSeriesPartitionClosed = errors.New("tsdb: series partition closed")
+)
+
+// DefaultSeriesPartitionCompactThreshold is the number of series IDs to hold in the in-memory
+// series map before compacting and rebuilding the on-disk representation.
+const DefaultSeriesPartitionCompactThreshold = 1 << 17 // 128K
+
+// SeriesPartition represents a subset of series file data.
+type SeriesPartition struct {
+ mu sync.RWMutex
+ wg sync.WaitGroup
+ id int
+ path string
+ closed bool
+
+ segments []*SeriesSegment
+ index *SeriesIndex
+ seq uint64 // series id sequence
+
+ compacting bool
+
+ CompactThreshold int
+
+ Logger *zap.Logger
+}
+
+// NewSeriesPartition returns a new instance of SeriesPartition.
+func NewSeriesPartition(id int, path string) *SeriesPartition {
+ return &SeriesPartition{
+ id: id,
+ path: path,
+ CompactThreshold: DefaultSeriesPartitionCompactThreshold,
+ Logger: zap.NewNop(),
+ }
+}
+
+// Open memory maps the data file at the partition's path.
+func (p *SeriesPartition) Open() error {
+ if p.closed {
+ return errors.New("tsdb: cannot reopen series partition")
+ }
+
+ // Create path if it doesn't exist.
+ if err := os.MkdirAll(filepath.Join(p.path), 0777); err != nil {
+ return err
+ }
+
+ // Open components.
+ if err := func() (err error) {
+ if err := p.openSegments(); err != nil {
+ return err
+ }
+
+ // Init last segment for writes.
+ if err := p.activeSegment().InitForWrite(); err != nil {
+ return err
+ }
+
+ p.index = NewSeriesIndex(p.IndexPath())
+ if err := p.index.Open(); err != nil {
+ return err
+ } else if p.index.Recover(p.segments); err != nil {
+ return err
+ }
+
+ return nil
+ }(); err != nil {
+ p.Close()
+ return err
+ }
+
+ return nil
+}
+
+func (p *SeriesPartition) openSegments() error {
+ fis, err := ioutil.ReadDir(p.path)
+ if err != nil {
+ return err
+ }
+
+ for _, fi := range fis {
+ segmentID, err := ParseSeriesSegmentFilename(fi.Name())
+ if err != nil {
+ continue
+ }
+
+ segment := NewSeriesSegment(segmentID, filepath.Join(p.path, fi.Name()))
+ if err := segment.Open(); err != nil {
+ return err
+ }
+ p.segments = append(p.segments, segment)
+ }
+
+ // Find max series id by searching segments in reverse order.
+ for i := len(p.segments) - 1; i >= 0; i-- {
+ if p.seq = p.segments[i].MaxSeriesID(); p.seq > 0 {
+ break
+ }
+ }
+
+ // Create initial segment if none exist.
+ if len(p.segments) == 0 {
+ segment, err := CreateSeriesSegment(0, filepath.Join(p.path, "0000"))
+ if err != nil {
+ return err
+ }
+ p.segments = append(p.segments, segment)
+ }
+
+ return nil
+}
+
+// Close unmaps the data files.
+func (p *SeriesPartition) Close() (err error) {
+ p.wg.Wait()
+
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ p.closed = true
+
+ for _, s := range p.segments {
+ if e := s.Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ p.segments = nil
+
+ if p.index != nil {
+ if e := p.index.Close(); e != nil && err == nil {
+ err = e
+ }
+ }
+ p.index = nil
+
+ return err
+}
+
+// ID returns the partition id.
+func (p *SeriesPartition) ID() int { return p.id }
+
+// Path returns the path to the partition.
+func (p *SeriesPartition) Path() string { return p.path }
+
+// Path returns the path to the series index.
+func (p *SeriesPartition) IndexPath() string { return filepath.Join(p.path, "index") }
+
+// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist.
+// The returned ids list returns values for new series and zero for existing series.
+func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitionIDs []int, ids []uint64) error {
+ var writeRequired bool
+ p.mu.RLock()
+ if p.closed {
+ p.mu.RUnlock()
+ return ErrSeriesPartitionClosed
+ }
+ for i := range keys {
+ if keyPartitionIDs[i] != p.id {
+ continue
+ }
+ id := p.index.FindIDBySeriesKey(p.segments, keys[i])
+ if id == 0 {
+ writeRequired = true
+ continue
+ }
+ ids[i] = id
+ }
+ p.mu.RUnlock()
+
+ // Exit if all series for this partition already exist.
+ if !writeRequired {
+ return nil
+ }
+
+ type keyRange struct {
+ id uint64
+ offset int64
+ }
+ newKeyRanges := make([]keyRange, 0, len(keys))
+
+ // Obtain write lock to create new series.
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return ErrSeriesPartitionClosed
+ }
+
+ // Track offsets of duplicate series.
+ newIDs := make(map[string]uint64, len(ids))
+
+ for i := range keys {
+ // Skip series that don't belong to the partition or have already been created.
+ if keyPartitionIDs[i] != p.id || ids[i] != 0 {
+ continue
+ }
+
+ // Re-attempt lookup under write lock.
+ key := keys[i]
+ if ids[i] = newIDs[string(key)]; ids[i] != 0 {
+ continue
+ } else if ids[i] = p.index.FindIDBySeriesKey(p.segments, key); ids[i] != 0 {
+ continue
+ }
+
+ // Write to series log and save offset.
+ id, offset, err := p.insert(key)
+ if err != nil {
+ return err
+ }
+
+ // Append new key to be added to hash map after flush.
+ ids[i] = id
+ newIDs[string(key)] = id
+ newKeyRanges = append(newKeyRanges, keyRange{id, offset})
+ }
+
+ // Flush active segment writes so we can access data in mmap.
+ if segment := p.activeSegment(); segment != nil {
+ if err := segment.Flush(); err != nil {
+ return err
+ }
+ }
+
+ // Add keys to hash map(s).
+ for _, keyRange := range newKeyRanges {
+ p.index.Insert(p.seriesKeyByOffset(keyRange.offset), keyRange.id, keyRange.offset)
+ }
+
+ // Check if we've crossed the compaction threshold.
+ if !p.compacting && p.CompactThreshold != 0 && p.index.InMemCount() >= uint64(p.CompactThreshold) {
+ p.compacting = true
+ logger := p.Logger.With(zap.String("path", p.path))
+ logger.Info("beginning series partition compaction")
+
+ startTime := time.Now()
+ p.wg.Add(1)
+ go func() {
+ defer p.wg.Done()
+
+ if err := NewSeriesPartitionCompactor().Compact(p); err != nil {
+ logger.With(zap.Error(err)).Error("series partition compaction failed")
+ }
+
+ logger.With(zap.Duration("elapsed", time.Since(startTime))).Info("completed series partition compaction")
+
+ // Clear compaction flag.
+ p.mu.Lock()
+ p.compacting = false
+ p.mu.Unlock()
+ }()
+ }
+
+ return nil
+}
+
+// DeleteSeriesID flags a series as permanently deleted.
+// If the series is reintroduced later then it must create a new id.
+func (p *SeriesPartition) DeleteSeriesID(id uint64) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return ErrSeriesPartitionClosed
+ }
+
+ // Already tombstoned, ignore.
+ if p.index.IsDeleted(id) {
+ return nil
+ }
+
+ // Write tombstone entry.
+ _, err := p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryTombstoneFlag, id, nil))
+ if err != nil {
+ return err
+ }
+
+ // Mark tombstone in memory.
+ p.index.Delete(id)
+
+ return nil
+}
+
+// IsDeleted returns true if the ID has been deleted before.
+func (p *SeriesPartition) IsDeleted(id uint64) bool {
+ p.mu.RLock()
+ if p.closed {
+ p.mu.RUnlock()
+ return false
+ }
+ v := p.index.IsDeleted(id)
+ p.mu.RUnlock()
+ return v
+}
+
+// SeriesKey returns the series key for a given id.
+func (p *SeriesPartition) SeriesKey(id uint64) []byte {
+ if id == 0 {
+ return nil
+ }
+ p.mu.RLock()
+ if p.closed {
+ p.mu.RUnlock()
+ return nil
+ }
+ key := p.seriesKeyByOffset(p.index.FindOffsetByID(id))
+ p.mu.RUnlock()
+ return key
+}
+
+// Series returns the parsed series name and tags for an offset.
+func (p *SeriesPartition) Series(id uint64) ([]byte, models.Tags) {
+ key := p.SeriesKey(id)
+ if key == nil {
+ return nil, nil
+ }
+ return ParseSeriesKey(key)
+}
+
+// FindIDBySeriesKey return the series id for the series key.
+func (p *SeriesPartition) FindIDBySeriesKey(key []byte) uint64 {
+ p.mu.RLock()
+ if p.closed {
+ p.mu.RUnlock()
+ return 0
+ }
+ id := p.index.FindIDBySeriesKey(p.segments, key)
+ p.mu.RUnlock()
+ return id
+}
+
+// SeriesCount returns the number of series.
+func (p *SeriesPartition) SeriesCount() uint64 {
+ p.mu.RLock()
+ if p.closed {
+ p.mu.RUnlock()
+ return 0
+ }
+ n := p.index.Count()
+ p.mu.RUnlock()
+ return n
+}
+
+// AppendSeriesIDs returns a list of all series ids.
+func (p *SeriesPartition) AppendSeriesIDs(a []uint64) []uint64 {
+ for _, segment := range p.segments {
+ a = segment.AppendSeriesIDs(a)
+ }
+ return a
+}
+
+// activeSegment returns the last segment.
+func (p *SeriesPartition) activeSegment() *SeriesSegment {
+ if len(p.segments) == 0 {
+ return nil
+ }
+ return p.segments[len(p.segments)-1]
+}
+
+func (p *SeriesPartition) insert(key []byte) (id uint64, offset int64, err error) {
+ // ID is built using a autoincrement sequence joined to the partition id.
+ // Format:
+ id = ((p.seq + 1) << 8) | uint64(p.id)
+
+ offset, err = p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryInsertFlag, id, key))
+ if err != nil {
+ return 0, 0, err
+ }
+
+ p.seq++
+ return id, offset, nil
+}
+
+// writeLogEntry appends an entry to the end of the active segment.
+// If there is no more room in the segment then a new segment is added.
+func (p *SeriesPartition) writeLogEntry(data []byte) (offset int64, err error) {
+ segment := p.activeSegment()
+ if segment == nil || !segment.CanWrite(data) {
+ if segment, err = p.createSegment(); err != nil {
+ return 0, err
+ }
+ }
+ return segment.WriteLogEntry(data)
+}
+
+// createSegment appends a new segment
+func (p *SeriesPartition) createSegment() (*SeriesSegment, error) {
+ // Close writer for active segment, if one exists.
+ if segment := p.activeSegment(); segment != nil {
+ if err := segment.CloseForWrite(); err != nil {
+ return nil, err
+ }
+ }
+
+ // Generate a new sequential segment identifier.
+ var id uint16
+ if len(p.segments) > 0 {
+ id = p.segments[len(p.segments)-1].ID() + 1
+ }
+ filename := fmt.Sprintf("%04x", id)
+
+ // Generate new empty segment.
+ segment, err := CreateSeriesSegment(id, filepath.Join(p.path, filename))
+ if err != nil {
+ return nil, err
+ }
+ p.segments = append(p.segments, segment)
+
+ // Allow segment to write.
+ if err := segment.InitForWrite(); err != nil {
+ return nil, err
+ }
+
+ return segment, nil
+}
+
+func (p *SeriesPartition) seriesKeyByOffset(offset int64) []byte {
+ if offset == 0 {
+ return nil
+ }
+
+ segmentID, pos := SplitSeriesOffset(offset)
+ for _, segment := range p.segments {
+ if segment.ID() != segmentID {
+ continue
+ }
+
+ key, _ := ReadSeriesKey(segment.Slice(pos + SeriesEntryHeaderSize))
+ return key
+ }
+
+ return nil
+}
+
+// SeriesPartitionCompactor represents an object reindexes a series partition and optionally compacts segments.
+type SeriesPartitionCompactor struct{}
+
+// NewSeriesPartitionCompactor returns a new instance of SeriesPartitionCompactor.
+func NewSeriesPartitionCompactor() *SeriesPartitionCompactor {
+ return &SeriesPartitionCompactor{}
+}
+
+// Compact rebuilds the series partition index.
+func (c *SeriesPartitionCompactor) Compact(p *SeriesPartition) error {
+ // Snapshot the partitions and index so we can check tombstones and replay at the end under lock.
+ p.mu.RLock()
+ segments := CloneSeriesSegments(p.segments)
+ index := p.index.Clone()
+ seriesN := p.index.Count()
+ p.mu.RUnlock()
+
+ // Compact index to a temporary location.
+ indexPath := index.path + ".compacting"
+ if err := c.compactIndexTo(index, seriesN, segments, indexPath); err != nil {
+ return err
+ }
+
+ // Swap compacted index under lock & replay since compaction.
+ if err := func() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ // Reopen index with new file.
+ if err := p.index.Close(); err != nil {
+ return err
+ } else if err := os.Rename(indexPath, index.path); err != nil {
+ return err
+ } else if err := p.index.Open(); err != nil {
+ return err
+ }
+
+ // Replay new entries.
+ if err := p.index.Recover(p.segments); err != nil {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *SeriesPartitionCompactor) compactIndexTo(index *SeriesIndex, seriesN uint64, segments []*SeriesSegment, path string) error {
+ hdr := NewSeriesIndexHeader()
+ hdr.Count = seriesN
+ hdr.Capacity = pow2((int64(hdr.Count) * 100) / SeriesIndexLoadFactor)
+
+ // Allocate space for maps.
+ keyIDMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize))
+ idOffsetMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize))
+
+ // Reindex all partitions.
+ for _, segment := range segments {
+ errDone := errors.New("done")
+
+ if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error {
+ // Make sure we don't go past the offset where the compaction began.
+ if offset >= index.maxOffset {
+ return errDone
+ }
+
+ // Only process insert entries.
+ switch flag {
+ case SeriesEntryInsertFlag: // fallthrough
+ case SeriesEntryTombstoneFlag:
+ return nil
+ default:
+ return fmt.Errorf("unexpected series partition log entry flag: %d", flag)
+ }
+
+ // Ignore entry if tombstoned.
+ if index.IsDeleted(id) {
+ return nil
+ }
+
+ // Save max series identifier processed.
+ hdr.MaxSeriesID, hdr.MaxOffset = id, offset
+
+ // Insert into maps.
+ c.insertIDOffsetMap(idOffsetMap, hdr.Capacity, id, offset)
+ return c.insertKeyIDMap(keyIDMap, hdr.Capacity, segments, key, offset, id)
+ }); err == errDone {
+ break
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Open file handler.
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Calculate map positions.
+ hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = SeriesIndexHeaderSize, int64(len(keyIDMap))
+ hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size, int64(len(idOffsetMap))
+
+ // Write header.
+ if _, err := hdr.WriteTo(f); err != nil {
+ return err
+ }
+
+ // Write maps.
+ if _, err := f.Write(keyIDMap); err != nil {
+ return err
+ } else if _, err := f.Write(idOffsetMap); err != nil {
+ return err
+ }
+
+ // Sync & close.
+ if err := f.Sync(); err != nil {
+ return err
+ } else if err := f.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *SeriesPartitionCompactor) insertKeyIDMap(dst []byte, capacity int64, segments []*SeriesSegment, key []byte, offset int64, id uint64) error {
+ mask := capacity - 1
+ hash := rhh.HashKey(key)
+
+ // Continue searching until we find an empty slot or lower probe distance.
+ for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask {
+ assert(i <= capacity, "key/id map full")
+ elem := dst[(pos * SeriesIndexElemSize):]
+
+ // If empty slot found or matching offset, insert and exit.
+ elemOffset := int64(binary.BigEndian.Uint64(elem[:8]))
+ elemID := binary.BigEndian.Uint64(elem[8:])
+ if elemOffset == 0 || elemOffset == offset {
+ binary.BigEndian.PutUint64(elem[:8], uint64(offset))
+ binary.BigEndian.PutUint64(elem[8:], id)
+ return nil
+ }
+
+ // Read key at position & hash.
+ elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize)
+ elemHash := rhh.HashKey(elemKey)
+
+ // If the existing elem has probed less than us, then swap places with
+ // existing elem, and keep going to find another slot for that elem.
+ if d := rhh.Dist(elemHash, pos, capacity); d < dist {
+ // Insert current values.
+ binary.BigEndian.PutUint64(elem[:8], uint64(offset))
+ binary.BigEndian.PutUint64(elem[8:], id)
+
+ // Swap with values in that position.
+ hash, key, offset, id = elemHash, elemKey, elemOffset, elemID
+
+ // Update current distance.
+ dist = d
+ }
+ }
+}
+
+func (c *SeriesPartitionCompactor) insertIDOffsetMap(dst []byte, capacity int64, id uint64, offset int64) {
+ mask := capacity - 1
+ hash := rhh.HashUint64(id)
+
+ // Continue searching until we find an empty slot or lower probe distance.
+ for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask {
+ assert(i <= capacity, "id/offset map full")
+ elem := dst[(pos * SeriesIndexElemSize):]
+
+ // If empty slot found or matching id, insert and exit.
+ elemID := binary.BigEndian.Uint64(elem[:8])
+ elemOffset := int64(binary.BigEndian.Uint64(elem[8:]))
+ if elemOffset == 0 || elemOffset == offset {
+ binary.BigEndian.PutUint64(elem[:8], id)
+ binary.BigEndian.PutUint64(elem[8:], uint64(offset))
+ return
+ }
+
+ // Hash key.
+ elemHash := rhh.HashUint64(elemID)
+
+ // If the existing elem has probed less than us, then swap places with
+ // existing elem, and keep going to find another slot for that elem.
+ if d := rhh.Dist(elemHash, pos, capacity); d < dist {
+ // Insert current values.
+ binary.BigEndian.PutUint64(elem[:8], id)
+ binary.BigEndian.PutUint64(elem[8:], uint64(offset))
+
+ // Swap with values in that position.
+ hash, id, offset = elemHash, elemID, elemOffset
+
+ // Update current distance.
+ dist = d
+ }
+ }
+}
+
+// pow2 returns the number that is the next highest power of 2.
+// Returns v if it is a power of 2.
+func pow2(v int64) int64 {
+ for i := int64(2); i < 1<<62; i *= 2 {
+ if i >= v {
+ return i
+ }
+ }
+ panic("unreachable")
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go b/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go
new file mode 100644
index 0000000000..bb0234a36f
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go
@@ -0,0 +1,395 @@
+package tsdb
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+
+ "github.com/influxdata/influxdb/pkg/mmap"
+)
+
+const (
+ SeriesSegmentVersion = 1
+ SeriesSegmentMagic = "SSEG"
+
+ SeriesSegmentHeaderSize = 4 + 1 // magic + version
+)
+
+// Series entry constants.
+const (
+ SeriesEntryFlagSize = 1
+ SeriesEntryHeaderSize = 1 + 8 // flag + id
+
+ SeriesEntryInsertFlag = 0x01
+ SeriesEntryTombstoneFlag = 0x02
+)
+
+var (
+ ErrInvalidSeriesSegment = errors.New("invalid series segment")
+ ErrInvalidSeriesSegmentVersion = errors.New("invalid series segment version")
+ ErrSeriesSegmentNotWritable = errors.New("series segment not writable")
+)
+
+// SeriesSegment represents a log of series entries.
+type SeriesSegment struct {
+ id uint16
+ path string
+
+ data []byte // mmap file
+ file *os.File // write file handle
+ w *bufio.Writer // bufferred file handle
+ size uint32 // current file size
+}
+
+// NewSeriesSegment returns a new instance of SeriesSegment.
+func NewSeriesSegment(id uint16, path string) *SeriesSegment {
+ return &SeriesSegment{
+ id: id,
+ path: path,
+ }
+}
+
+// CreateSeriesSegment generates an empty segment at path.
+func CreateSeriesSegment(id uint16, path string) (*SeriesSegment, error) {
+ // Generate segment in temp location.
+ f, err := os.Create(path + ".initializing")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ // Write header to file and close.
+ hdr := NewSeriesSegmentHeader()
+ if _, err := hdr.WriteTo(f); err != nil {
+ return nil, err
+ } else if err := f.Truncate(int64(SeriesSegmentSize(id))); err != nil {
+ return nil, err
+ } else if err := f.Close(); err != nil {
+ return nil, err
+ }
+
+ // Swap with target path.
+ if err := os.Rename(f.Name(), path); err != nil {
+ return nil, err
+ }
+
+ // Open segment at new location.
+ segment := NewSeriesSegment(id, path)
+ if err := segment.Open(); err != nil {
+ return nil, err
+ }
+ return segment, nil
+}
+
+// Open memory maps the data file at the file's path.
+func (s *SeriesSegment) Open() error {
+ if err := func() (err error) {
+ // Memory map file data.
+ if s.data, err = mmap.Map(s.path, int64(SeriesSegmentSize(s.id))); err != nil {
+ return err
+ }
+
+ // Read header.
+ hdr, err := ReadSeriesSegmentHeader(s.data)
+ if err != nil {
+ return err
+ } else if hdr.Version != SeriesSegmentVersion {
+ return ErrInvalidSeriesSegmentVersion
+ }
+
+ return nil
+ }(); err != nil {
+ s.Close()
+ return err
+ }
+
+ return nil
+}
+
+// InitForWrite initializes a write handle for the segment.
+// This is only used for the last segment in the series file.
+func (s *SeriesSegment) InitForWrite() (err error) {
+ // Only calculcate segment data size if writing.
+ for s.size = uint32(SeriesSegmentHeaderSize); s.size < uint32(len(s.data)); {
+ flag, _, _, sz := ReadSeriesEntry(s.data[s.size:])
+ if flag == 0 {
+ break
+ }
+ s.size += uint32(sz)
+ }
+
+ // Open file handler for writing & seek to end of data.
+ if s.file, err = os.OpenFile(s.path, os.O_WRONLY|os.O_CREATE, 0666); err != nil {
+ return err
+ } else if _, err := s.file.Seek(int64(s.size), io.SeekStart); err != nil {
+ return err
+ }
+ s.w = bufio.NewWriter(s.file)
+
+ return nil
+}
+
+// Close unmaps the segment.
+func (s *SeriesSegment) Close() (err error) {
+ if e := s.CloseForWrite(); e != nil && err == nil {
+ err = e
+ }
+
+ if s.data != nil {
+ if e := mmap.Unmap(s.data); e != nil && err == nil {
+ err = e
+ }
+ s.data = nil
+ }
+
+ return err
+}
+
+func (s *SeriesSegment) CloseForWrite() (err error) {
+ if s.w != nil {
+ if e := s.w.Flush(); e != nil && err == nil {
+ err = e
+ }
+ s.w = nil
+ }
+
+ if s.file != nil {
+ if e := s.file.Close(); e != nil && err == nil {
+ err = e
+ }
+ s.file = nil
+ }
+ return err
+}
+
+// ID returns the id the segment was initialized with.
+func (s *SeriesSegment) ID() uint16 { return s.id }
+
+// Size returns the size of the data in the segment.
+// This is only populated once InitForWrite() is called.
+func (s *SeriesSegment) Size() int64 { return int64(s.size) }
+
+// Slice returns a byte slice starting at pos.
+func (s *SeriesSegment) Slice(pos uint32) []byte { return s.data[pos:] }
+
+// WriteLogEntry writes entry data into the segment.
+// Returns the offset of the beginning of the entry.
+func (s *SeriesSegment) WriteLogEntry(data []byte) (offset int64, err error) {
+ if !s.CanWrite(data) {
+ return 0, ErrSeriesSegmentNotWritable
+ }
+
+ offset = JoinSeriesOffset(s.id, s.size)
+ if _, err := s.w.Write(data); err != nil {
+ return 0, err
+ }
+ s.size += uint32(len(data))
+
+ return offset, nil
+}
+
+// CanWrite returns true if segment has space to write entry data.
+func (s *SeriesSegment) CanWrite(data []byte) bool {
+ return s.w != nil && s.size+uint32(len(data)) <= SeriesSegmentSize(s.id)
+}
+
+// Flush flushes the buffer to disk.
+func (s *SeriesSegment) Flush() error {
+ if s.w == nil {
+ return nil
+ }
+ return s.w.Flush()
+}
+
+// AppendSeriesIDs appends all the segments ids to a slice. Returns the new slice.
+func (s *SeriesSegment) AppendSeriesIDs(a []uint64) []uint64 {
+ s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error {
+ if flag == SeriesEntryInsertFlag {
+ a = append(a, id)
+ }
+ return nil
+ })
+ return a
+}
+
+// MaxSeriesID returns the highest series id in the segment.
+func (s *SeriesSegment) MaxSeriesID() uint64 {
+ var max uint64
+ s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error {
+ if flag == SeriesEntryInsertFlag && id > max {
+ max = id
+ }
+ return nil
+ })
+ return max
+}
+
+// ForEachEntry executes fn for every entry in the segment.
+func (s *SeriesSegment) ForEachEntry(fn func(flag uint8, id uint64, offset int64, key []byte) error) error {
+ for pos := uint32(SeriesSegmentHeaderSize); pos < uint32(len(s.data)); {
+ flag, id, key, sz := ReadSeriesEntry(s.data[pos:])
+ if flag == 0 {
+ break
+ }
+
+ offset := JoinSeriesOffset(s.id, pos)
+ if err := fn(flag, id, offset, key); err != nil {
+ return err
+ }
+ pos += uint32(sz)
+ }
+ return nil
+}
+
+// Clone returns a copy of the segment. Excludes the write handler, if set.
+func (s *SeriesSegment) Clone() *SeriesSegment {
+ return &SeriesSegment{
+ id: s.id,
+ path: s.path,
+ data: s.data,
+ size: s.size,
+ }
+}
+
+// CloneSeriesSegments returns a copy of a slice of segments.
+func CloneSeriesSegments(a []*SeriesSegment) []*SeriesSegment {
+ other := make([]*SeriesSegment, len(a))
+ for i := range a {
+ other[i] = a[i].Clone()
+ }
+ return other
+}
+
+// FindSegment returns a segment by id.
+func FindSegment(a []*SeriesSegment, id uint16) *SeriesSegment {
+ for _, segment := range a {
+ if segment.id == id {
+ return segment
+ }
+ }
+ return nil
+}
+
+// ReadSeriesKeyFromSegments returns a series key from an offset within a set of segments.
+func ReadSeriesKeyFromSegments(a []*SeriesSegment, offset int64) []byte {
+ segmentID, pos := SplitSeriesOffset(offset)
+ segment := FindSegment(a, segmentID)
+ if segment == nil {
+ return nil
+ }
+ buf := segment.Slice(pos)
+ key, _ := ReadSeriesKey(buf)
+ return key
+}
+
+// JoinSeriesOffset returns an offset that combines the 2-byte segmentID and 4-byte pos.
+func JoinSeriesOffset(segmentID uint16, pos uint32) int64 {
+ return (int64(segmentID) << 32) | int64(pos)
+}
+
+// SplitSeriesOffset splits a offset into its 2-byte segmentID and 4-byte pos parts.
+func SplitSeriesOffset(offset int64) (segmentID uint16, pos uint32) {
+ return uint16((offset >> 32) & 0xFFFF), uint32(offset & 0xFFFFFFFF)
+}
+
+// IsValidSeriesSegmentFilename returns true if filename is a 4-character lowercase hexidecimal number.
+func IsValidSeriesSegmentFilename(filename string) bool {
+ return seriesSegmentFilenameRegex.MatchString(filename)
+}
+
+// ParseSeriesSegmentFilename returns the id represented by the hexidecimal filename.
+func ParseSeriesSegmentFilename(filename string) (uint16, error) {
+ i, err := strconv.ParseUint(filename, 16, 32)
+ return uint16(i), err
+}
+
+var seriesSegmentFilenameRegex = regexp.MustCompile(`^[0-9a-f]{4}$`)
+
+// SeriesSegmentSize returns the maximum size of the segment.
+// The size goes up by powers of 2 starting from 4MB and reaching 256MB.
+func SeriesSegmentSize(id uint16) uint32 {
+ const min = 22 // 4MB
+ const max = 28 // 256MB
+
+ shift := id + min
+ if shift >= max {
+ shift = max
+ }
+ return 1 << shift
+}
+
+// SeriesSegmentHeader represents the header of a series segment.
+type SeriesSegmentHeader struct {
+ Version uint8
+}
+
+// NewSeriesSegmentHeader returns a new instance of SeriesSegmentHeader.
+func NewSeriesSegmentHeader() SeriesSegmentHeader {
+ return SeriesSegmentHeader{Version: SeriesSegmentVersion}
+}
+
+// ReadSeriesSegmentHeader returns the header from data.
+func ReadSeriesSegmentHeader(data []byte) (hdr SeriesSegmentHeader, err error) {
+ r := bytes.NewReader(data)
+
+ // Read magic number.
+ magic := make([]byte, len(SeriesSegmentMagic))
+ if _, err := io.ReadFull(r, magic); err != nil {
+ return hdr, err
+ } else if !bytes.Equal([]byte(SeriesSegmentMagic), magic) {
+ return hdr, ErrInvalidSeriesSegment
+ }
+
+ // Read version.
+ if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil {
+ return hdr, err
+ }
+
+ return hdr, nil
+}
+
+// WriteTo writes the header to w.
+func (hdr *SeriesSegmentHeader) WriteTo(w io.Writer) (n int64, err error) {
+ var buf bytes.Buffer
+ buf.WriteString(SeriesSegmentMagic)
+ binary.Write(&buf, binary.BigEndian, hdr.Version)
+ return buf.WriteTo(w)
+}
+
+func ReadSeriesEntry(data []byte) (flag uint8, id uint64, key []byte, sz int64) {
+ // If flag byte is zero then no more entries exist.
+ flag, data = uint8(data[0]), data[1:]
+ if flag == 0 {
+ return 0, 0, nil, 1
+ }
+
+ id, data = binary.BigEndian.Uint64(data), data[8:]
+ switch flag {
+ case SeriesEntryInsertFlag:
+ key, _ = ReadSeriesKey(data)
+ }
+ return flag, id, key, int64(SeriesEntryHeaderSize + len(key))
+}
+
+func AppendSeriesEntry(dst []byte, flag uint8, id uint64, key []byte) []byte {
+ buf := make([]byte, 8)
+ binary.BigEndian.PutUint64(buf, id)
+
+ dst = append(dst, flag)
+ dst = append(dst, buf...)
+
+ switch flag {
+ case SeriesEntryInsertFlag:
+ dst = append(dst, key...)
+ case SeriesEntryTombstoneFlag:
+ default:
+ panic(fmt.Sprintf("unreachable: invalid flag: %d", flag))
+ }
+ return dst
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go
new file mode 100644
index 0000000000..fe4f87c8fd
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go
@@ -0,0 +1,214 @@
+package tsdb_test
+
+import (
+ "bytes"
+ "path/filepath"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/influxdata/influxdb/tsdb"
+)
+
+func TestSeriesSegment(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ // Create a new initial segment (4mb) and initialize for writing.
+ segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
+ if err != nil {
+ t.Fatal(err)
+ } else if err := segment.InitForWrite(); err != nil {
+ t.Fatal(err)
+ }
+ defer segment.Close()
+
+ // Write initial entry.
+ key1 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil)
+ offset, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 1, key1))
+ if err != nil {
+ t.Fatal(err)
+ } else if offset != tsdb.SeriesSegmentHeaderSize {
+ t.Fatalf("unexpected offset: %d", offset)
+ }
+
+ // Write a large entry (3mb).
+ key2 := tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("m"), 3*(1<<20)), nil)
+ if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 2, key2)); err != nil {
+ t.Fatal(err)
+ } else if offset != tsdb.SeriesSegmentHeaderSize {
+ t.Fatalf("unexpected offset: %d", offset)
+ }
+
+ // Write another entry that is too large for the remaining segment space.
+ if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 3, tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("n"), 3*(1<<20)), nil))); err != tsdb.ErrSeriesSegmentNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ // Verify two entries exist.
+ var n int
+ segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error {
+ switch n {
+ case 0:
+ if flag != tsdb.SeriesEntryInsertFlag || id != 1 || !bytes.Equal(key1, key) {
+ t.Fatalf("unexpected entry(0): %d, %d, %q", flag, id, key)
+ }
+ case 1:
+ if flag != tsdb.SeriesEntryInsertFlag || id != 2 || !bytes.Equal(key2, key) {
+ t.Fatalf("unexpected entry(1): %d, %d, %q", flag, id, key)
+ }
+ default:
+ t.Fatalf("too many entries")
+ }
+ n++
+ return nil
+ })
+ if n != 2 {
+ t.Fatalf("unexpected entry count: %d", n)
+ }
+}
+
+func TestSeriesSegment_AppendSeriesIDs(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
+ if err != nil {
+ t.Fatal(err)
+ } else if err := segment.InitForWrite(); err != nil {
+ t.Fatal(err)
+ }
+ defer segment.Close()
+
+ // Write entries.
+ if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil {
+ t.Fatal(err)
+ } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil {
+ t.Fatal(err)
+ } else if err := segment.Flush(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Collect series ids with existing set.
+ a := segment.AppendSeriesIDs([]uint64{1, 2})
+ if diff := cmp.Diff(a, []uint64{1, 2, 10, 11}); diff != "" {
+ t.Fatal(diff)
+ }
+}
+
+func TestSeriesSegment_MaxSeriesID(t *testing.T) {
+ dir, cleanup := MustTempDir()
+ defer cleanup()
+
+ segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
+ if err != nil {
+ t.Fatal(err)
+ } else if err := segment.InitForWrite(); err != nil {
+ t.Fatal(err)
+ }
+ defer segment.Close()
+
+ // Write entries.
+ if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil {
+ t.Fatal(err)
+ } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil {
+ t.Fatal(err)
+ } else if err := segment.Flush(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify maximum.
+ if max := segment.MaxSeriesID(); max != 11 {
+ t.Fatalf("unexpected max: %d", max)
+ }
+}
+
+func TestSeriesSegmentHeader(t *testing.T) {
+ // Verify header initializes correctly.
+ hdr := tsdb.NewSeriesSegmentHeader()
+ if hdr.Version != tsdb.SeriesSegmentVersion {
+ t.Fatalf("unexpected version: %d", hdr.Version)
+ }
+
+ // Marshal/unmarshal.
+ var buf bytes.Buffer
+ if _, err := hdr.WriteTo(&buf); err != nil {
+ t.Fatal(err)
+ } else if other, err := tsdb.ReadSeriesSegmentHeader(buf.Bytes()); err != nil {
+ t.Fatal(err)
+ } else if diff := cmp.Diff(hdr, other); diff != "" {
+ t.Fatal(diff)
+ }
+}
+
+func TestJoinSeriesOffset(t *testing.T) {
+ if offset := tsdb.JoinSeriesOffset(0x1234, 0x56789ABC); offset != 0x123456789ABC {
+ t.Fatalf("unexpected offset: %x", offset)
+ }
+}
+
+func TestSplitSeriesOffset(t *testing.T) {
+ if segmentID, pos := tsdb.SplitSeriesOffset(0x123456789ABC); segmentID != 0x1234 || pos != 0x56789ABC {
+ t.Fatalf("unexpected segmentID/pos: %x/%x", segmentID, pos)
+ }
+}
+
+func TestIsValidSeriesSegmentFilename(t *testing.T) {
+ if tsdb.IsValidSeriesSegmentFilename("") {
+ t.Fatal("expected invalid")
+ } else if tsdb.IsValidSeriesSegmentFilename("0ab") {
+ t.Fatal("expected invalid")
+ } else if !tsdb.IsValidSeriesSegmentFilename("192a") {
+ t.Fatal("expected valid")
+ }
+}
+
+func TestParseSeriesSegmentFilename(t *testing.T) {
+ if v, err := tsdb.ParseSeriesSegmentFilename("a90b"); err != nil {
+ t.Fatal(err)
+ } else if v != 0xA90B {
+ t.Fatalf("unexpected value: %x", v)
+ }
+ if v, err := tsdb.ParseSeriesSegmentFilename("0001"); err != nil {
+ t.Fatal(err)
+ } else if v != 1 {
+ t.Fatalf("unexpected value: %x", v)
+ }
+ if _, err := tsdb.ParseSeriesSegmentFilename("invalid"); err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestSeriesSegmentSize(t *testing.T) {
+ const mb = (1 << 20)
+ if sz := tsdb.SeriesSegmentSize(0); sz != 4*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(1); sz != 8*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(2); sz != 16*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(3); sz != 32*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(4); sz != 64*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(5); sz != 128*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(6); sz != 256*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ } else if sz := tsdb.SeriesSegmentSize(7); sz != 256*mb {
+ t.Fatalf("unexpected size: %d", sz)
+ }
+}
+
+func TestSeriesEntry(t *testing.T) {
+ seriesKey := tsdb.AppendSeriesKey(nil, []byte("m0"), nil)
+ buf := tsdb.AppendSeriesEntry(nil, 1, 2, seriesKey)
+ if flag, id, key, sz := tsdb.ReadSeriesEntry(buf); flag != 1 {
+ t.Fatalf("unexpected flag: %d", flag)
+ } else if id != 2 {
+ t.Fatalf("unexpected id: %d", id)
+ } else if !bytes.Equal(seriesKey, key) {
+ t.Fatalf("unexpected key: %q", key)
+ } else if sz != int64(tsdb.SeriesEntryHeaderSize+len(key)) {
+ t.Fatalf("unexpected size: %d", sz)
+ }
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_set.go b/vendor/github.com/influxdata/influxdb/tsdb/series_set.go
new file mode 100644
index 0000000000..92f5d06d54
--- /dev/null
+++ b/vendor/github.com/influxdata/influxdb/tsdb/series_set.go
@@ -0,0 +1,73 @@
+package tsdb
+
+import (
+ "sync"
+
+ "github.com/RoaringBitmap/roaring"
+)
+
+// SeriesIDSet represents a lockable bitmap of series ids.
+type SeriesIDSet struct {
+ sync.RWMutex
+ bitmap *roaring.Bitmap
+}
+
+// NewSeriesIDSet returns a new instance of SeriesIDSet.
+func NewSeriesIDSet() *SeriesIDSet {
+ return &SeriesIDSet{
+ bitmap: roaring.NewBitmap(),
+ }
+}
+
+// Add adds the series id to the set.
+func (s *SeriesIDSet) Add(id uint64) {
+ s.Lock()
+ defer s.Unlock()
+ s.AddNoLock(id)
+}
+
+// AddNoLock adds the series id to the set. Add is not safe for use from multiple
+// goroutines. Callers must manage synchronization.
+func (s *SeriesIDSet) AddNoLock(id uint64) {
+ s.bitmap.Add(uint32(id))
+}
+
+// Contains returns true if the id exists in the set.
+func (s *SeriesIDSet) Contains(id uint64) bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.ContainsNoLock(id)
+}
+
+// ContainsNoLock returns true if the id exists in the set. ContainsNoLock is
+// not safe for use from multiple goroutines. The caller must manage synchronization.
+func (s *SeriesIDSet) ContainsNoLock(id uint64) bool {
+ return s.bitmap.Contains(uint32(id))
+}
+
+// Remove removes the id from the set.
+func (s *SeriesIDSet) Remove(id uint64) {
+ s.Lock()
+ defer s.Unlock()
+ s.RemoveNoLock(id)
+}
+
+// RemoveNoLock removes the id from the set. RemoveNoLock is not safe for use
+// from multiple goroutines. The caller must manage synchronization.
+func (s *SeriesIDSet) RemoveNoLock(id uint64) {
+ s.bitmap.Remove(uint32(id))
+}
+
+// Merge merged the contents of others into s.
+func (s *SeriesIDSet) Merge(others ...*SeriesIDSet) {
+ bms := make([]*roaring.Bitmap, 0, len(others))
+ for _, other := range others {
+ other.RLock()
+ bms = append(bms, other.bitmap)
+ other.RUnlock()
+ }
+
+ s.Lock()
+ defer s.Unlock()
+ s.bitmap = roaring.FastOr(bms...)
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard.go b/vendor/github.com/influxdata/influxdb/tsdb/shard.go
index 4beedcf3bd..40dbd0170b 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/shard.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/shard.go
@@ -19,6 +19,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/influxdata/influxdb/models"
+ "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/file"
"github.com/influxdata/influxdb/pkg/limiter"
@@ -110,8 +111,8 @@ type PartialWriteError struct {
Reason string
Dropped int
- // The set of series keys that were dropped. Can be nil.
- DroppedKeys map[string]struct{}
+ // A sorted slice of series keys that were dropped.
+ DroppedKeys [][]byte
}
func (e PartialWriteError) Error() string {
@@ -130,6 +131,7 @@ type Shard struct {
database string
retentionPolicy string
+ sfile *SeriesFile
options EngineOptions
mu sync.RWMutex
@@ -150,7 +152,7 @@ type Shard struct {
}
// NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index
-func NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard {
+func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard {
db, rp := decodeStorePath(path)
logger := zap.NewNop()
@@ -158,6 +160,7 @@ func NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard
id: id,
path: path,
walPath: walPath,
+ sfile: sfile,
options: opt,
closing: make(chan struct{}),
@@ -181,7 +184,7 @@ func NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard
return s
}
-// WithLogger sets the logger on the shard.
+// WithLogger sets the logger on the shard. It must be called before Open.
func (s *Shard) WithLogger(log *zap.Logger) {
s.baseLogger = log
engine, err := s.engine()
@@ -292,9 +295,11 @@ func (s *Shard) Open() error {
return nil
}
+ seriesIDSet := NewSeriesIDSet()
+
// Initialize underlying index.
ipath := filepath.Join(s.path, "index")
- idx, err := NewIndex(s.id, s.database, ipath, s.options)
+ idx, err := NewIndex(s.id, s.database, ipath, seriesIDSet, s.sfile, s.options)
if err != nil {
return err
}
@@ -307,7 +312,7 @@ func (s *Shard) Open() error {
idx.WithLogger(s.baseLogger)
// Initialize underlying engine.
- e, err := NewEngine(s.id, idx, s.database, s.path, s.walPath, s.options)
+ e, err := NewEngine(s.id, idx, s.database, s.path, s.walPath, s.sfile, s.options)
if err != nil {
return err
}
@@ -432,12 +437,15 @@ func (s *Shard) UnloadIndex() {
s.index.RemoveShard(s.id)
}
-// Index returns a reference to the underlying index.
-// This should only be used by utilities and not directly accessed by the database.
-func (s *Shard) Index() Index {
+// Index returns a reference to the underlying index. It returns an error if
+// the index is nil.
+func (s *Shard) Index() (Index, error) {
s.mu.RLock()
defer s.mu.RUnlock()
- return s.index
+ if err := s.ready(); err != nil {
+ return nil, err
+ }
+ return s.index, nil
}
// IsIdle return true if the shard is not receiving writes and is fully compacted.
@@ -570,7 +578,7 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
}
// Add new series. Check for partial writes.
- var droppedKeys map[string]struct{}
+ var droppedKeys [][]byte
if err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil {
switch err := err.(type) {
case *PartialWriteError:
@@ -613,10 +621,8 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point,
// Skip points if keys have been dropped.
// The drop count has already been incremented during series creation.
- if droppedKeys != nil {
- if _, ok := droppedKeys[string(keys[i])]; ok {
- continue
- }
+ if len(droppedKeys) > 0 && bytesutil.Contains(droppedKeys, keys[i]) {
+ continue
}
name := p.Name()
@@ -713,12 +719,12 @@ func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error
}
// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive)
-func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64) error {
+func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64, removeIndex bool) error {
engine, err := s.engine()
if err != nil {
return err
}
- return engine.DeleteSeriesRange(itr, min, max)
+ return engine.DeleteSeriesRange(itr, min, max, removeIndex)
}
// DeleteMeasurement deletes a measurement and all underlying series.
@@ -739,15 +745,6 @@ func (s *Shard) SeriesN() int64 {
return engine.SeriesN()
}
-// SeriesSketches returns the series sketches for the shard.
-func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {
- engine, err := s.engine()
- if err != nil {
- return nil, nil, err
- }
- return engine.SeriesSketches()
-}
-
// MeasurementsSketches returns the measurement sketches for the shard.
func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {
engine, err := s.engine()
@@ -757,16 +754,6 @@ func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, erro
return engine.MeasurementsSketches()
}
-// MeasurementNamesByExpr returns names of measurements matching the condition.
-// If cond is nil then all measurement names are returned.
-func (s *Shard) MeasurementNamesByExpr(auth query.Authorizer, cond influxql.Expr) ([][]byte, error) {
- engine, err := s.engine()
- if err != nil {
- return nil, err
- }
- return engine.MeasurementNamesByExpr(auth, cond)
-}
-
// MeasurementNamesByRegex returns names of measurements matching the regular expression.
func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
engine, err := s.engine()
@@ -776,34 +763,6 @@ func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {
return engine.MeasurementNamesByRegex(re)
}
-func (s *Shard) MeasurementSeriesKeysByExprIterator(name []byte, expr influxql.Expr) (SeriesIterator, error) {
- engine, err := s.engine()
- if err != nil {
- return nil, err
- }
- return engine.MeasurementSeriesKeysByExprIterator(name, expr)
-}
-
-// MeasurementSeriesKeysByExpr returns a list of series keys from the shard
-// matching expr.
-func (s *Shard) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {
- engine, err := s.engine()
- if err != nil {
- return nil, err
- }
- return engine.MeasurementSeriesKeysByExpr(name, expr)
-}
-
-// TagKeyHasAuthorizedSeries determines if there exists an authorised series on
-// the provided measurement with the provided tag key.
-func (s *Shard) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool {
- engine, err := s.engine()
- if err != nil {
- return false
- }
- return engine.TagKeyHasAuthorizedSeries(auth, name, key)
-}
-
// MeasurementTagKeysByExpr returns all the tag keys for the provided expression.
func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {
engine, err := s.engine()
@@ -816,11 +775,12 @@ func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[s
// MeasurementTagKeyValuesByExpr returns all the tag keys values for the
// provided expression.
func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {
- engine, err := s.engine()
+ index, err := s.Index()
if err != nil {
return nil, err
}
- return engine.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted)
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
+ return indexSet.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted)
}
// MeasurementFields returns fields for a measurement.
@@ -865,11 +825,17 @@ func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt
switch m.SystemIterator {
case "_fieldKeys":
- return NewFieldKeysIterator(engine, opt)
+ return NewFieldKeysIterator(s, opt)
case "_series":
- return s.createSeriesIterator(opt)
+ // TODO(benbjohnson): Move up to the Shards.CreateIterator().
+ index, err := s.Index()
+ if err != nil {
+ return nil, err
+ }
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
+ return NewSeriesPointIterator(indexSet, engine.MeasurementFieldSet(), opt)
case "_tagKeys":
- return NewTagKeysIterator(engine, opt)
+ return NewTagKeysIterator(s, opt)
}
return engine.CreateIterator(ctx, m.Name, opt)
}
@@ -882,32 +848,6 @@ func (s *Shard) CreateCursor(ctx context.Context, r *CursorRequest) (Cursor, err
return engine.CreateCursor(ctx, r)
}
-// createSeriesIterator returns a new instance of SeriesIterator.
-func (s *Shard) createSeriesIterator(opt query.IteratorOptions) (query.Iterator, error) {
- engine, err := s.engine()
- if err != nil {
- return nil, err
- }
-
- // Only equality operators are allowed.
- influxql.WalkFunc(opt.Condition, func(n influxql.Node) {
- switch n := n.(type) {
- case *influxql.BinaryExpr:
- switch n.Op {
- case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX,
- influxql.OR, influxql.AND:
- default:
- err = errors.New("invalid tag comparison operator")
- }
- }
- })
- if err != nil {
- return nil, err
- }
-
- return engine.SeriesPointIterator(opt)
-}
-
// FieldDimensions returns unique sets of fields and dimensions across a list of sources.
func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {
engine, err := s.engine()
@@ -918,6 +858,10 @@ func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influx
fields = make(map[string]influxql.DataType)
dimensions = make(map[string]struct{})
+ index, err := s.Index()
+ if err != nil {
+ return nil, nil, err
+ }
for _, name := range measurements {
// Handle system sources.
if strings.HasPrefix(name, "_") {
@@ -959,7 +903,8 @@ func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influx
}
}
- if err := engine.ForEachMeasurementTagKey([]byte(name), func(key []byte) error {
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}
+ if err := indexSet.ForEachMeasurementTagKey([]byte(name), func(key []byte) error {
dimensions[string(key)] = struct{}{}
return nil
}); err != nil {
@@ -1160,14 +1105,6 @@ func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error {
return engine.ForEachMeasurementName(fn)
}
-func (s *Shard) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {
- engine, err := s.engine()
- if err != nil {
- return err
- }
- return engine.ForEachMeasurementTagKey(name, fn)
-}
-
func (s *Shard) TagKeyCardinality(name, key []byte) int {
engine, err := s.engine()
if err != nil {
@@ -1177,16 +1114,16 @@ func (s *Shard) TagKeyCardinality(name, key []byte) int {
}
// Digest returns a digest of the shard.
-func (s *Shard) Digest() (io.ReadCloser, error) {
+func (s *Shard) Digest() (io.ReadCloser, int64, error) {
engine, err := s.engine()
if err != nil {
- return nil, err
+ return nil, 0, err
}
// Make sure the shard is idle/cold. (No use creating a digest of a
// hot shard that is rapidly changing.)
if !engine.IsIdle() {
- return nil, ErrShardNotIdle
+ return nil, 0, ErrShardNotIdle
}
return engine.Digest()
@@ -1743,13 +1680,19 @@ type Field struct {
// NewFieldKeysIterator returns an iterator that can be iterated over to
// retrieve field keys.
-func NewFieldKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterator, error) {
- itr := &fieldKeysIterator{engine: engine}
+func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
+ itr := &fieldKeysIterator{shard: sh}
+
+ index, err := sh.Index()
+ if err != nil {
+ return nil, err
+ }
// Retrieve measurements from shard. Filter if condition specified.
//
// FGA is currently not supported when retrieving field keys.
- names, err := engine.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition)
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
+ names, err := indexSet.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition)
if err != nil {
return nil, err
}
@@ -1760,9 +1703,9 @@ func NewFieldKeysIterator(engine Engine, opt query.IteratorOptions) (query.Itera
// fieldKeysIterator iterates over measurements and gets field keys from each measurement.
type fieldKeysIterator struct {
- engine Engine
- names [][]byte // remaining measurement names
- buf struct {
+ shard *Shard
+ names [][]byte // remaining measurement names
+ buf struct {
name []byte // current measurement name
fields []Field // current measurement's fields
}
@@ -1784,7 +1727,7 @@ func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) {
}
itr.buf.name = itr.names[0]
- mf := itr.engine.MeasurementFields(itr.buf.name)
+ mf := itr.shard.MeasurementFields(itr.buf.name)
if mf != nil {
fset := mf.FieldSet()
if len(fset) == 0 {
@@ -1820,10 +1763,16 @@ func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) {
}
// NewTagKeysIterator returns a new instance of TagKeysIterator.
-func NewTagKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterator, error) {
+func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) {
fn := func(name []byte) ([][]byte, error) {
+ index, err := sh.Index()
+ if err != nil {
+ return nil, err
+ }
+
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
var keys [][]byte
- if err := engine.ForEachMeasurementTagKey(name, func(key []byte) error {
+ if err := indexSet.ForEachMeasurementTagKey(name, func(key []byte) error {
keys = append(keys, key)
return nil
}); err != nil {
@@ -1831,15 +1780,21 @@ func NewTagKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterato
}
return keys, nil
}
- return newMeasurementKeysIterator(engine, fn, opt)
+ return newMeasurementKeysIterator(sh, fn, opt)
}
// measurementKeyFunc is the function called by measurementKeysIterator.
type measurementKeyFunc func(name []byte) ([][]byte, error)
-func newMeasurementKeysIterator(engine Engine, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) {
+func newMeasurementKeysIterator(sh *Shard, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) {
+ index, err := sh.Index()
+ if err != nil {
+ return nil, err
+ }
+
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile}
itr := &measurementKeysIterator{fn: fn}
- names, err := engine.MeasurementNamesByExpr(opt.Authorizer, opt.Condition)
+ names, err := indexSet.MeasurementNamesByExpr(opt.Authorizer, opt.Condition)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go b/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go
index a398a9bd3c..f34f8555c3 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go
@@ -14,6 +14,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxql"
)
@@ -207,7 +208,8 @@ mem,host=serverB value=50i,val3=t 10
// filesystem paths.
type TempShard struct {
*Shard
- path string
+ path string
+ sfile *SeriesFile
}
// NewTempShard returns a new instance of TempShard with temp paths.
@@ -218,27 +220,37 @@ func NewTempShard(index string) *TempShard {
panic(err)
}
+ // Create series file.
+ sfile := NewSeriesFile(filepath.Join(dir, "db0", SeriesFileDirectory))
+ sfile.Logger = logger.New(os.Stdout)
+ if err := sfile.Open(); err != nil {
+ panic(err)
+ }
+
// Build engine options.
opt := NewEngineOptions()
opt.IndexVersion = index
opt.Config.WALDir = filepath.Join(dir, "wal")
if index == "inmem" {
- opt.InmemIndex, _ = NewInmemIndex(path.Base(dir))
+ opt.InmemIndex, _ = NewInmemIndex(path.Base(dir), sfile)
}
return &TempShard{
Shard: NewShard(0,
filepath.Join(dir, "data", "db0", "rp0", "1"),
filepath.Join(dir, "wal", "db0", "rp0", "1"),
+ sfile,
opt,
),
- path: dir,
+ sfile: sfile,
+ path: dir,
}
}
// Close closes the shard and removes all underlying data.
func (sh *TempShard) Close() error {
defer os.RemoveAll(sh.path)
+ sh.sfile.Close()
return sh.Shard.Close()
}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go b/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go
index 46796f48d5..226a87f1a8 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go
@@ -37,11 +37,14 @@ func TestShardWriteAndIndex(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
// Calling WritePoints when the engine is not open will return
// ErrEngineClosed.
@@ -83,7 +86,7 @@ func TestShardWriteAndIndex(t *testing.T) {
// ensure the index gets loaded after closing and opening the shard
sh.Close()
- sh = tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh = tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -104,12 +107,15 @@ func TestMaxSeriesLimit(t *testing.T) {
tmpShard := path.Join(tmpDir, "db", "rp", "1")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
opts.Config.MaxSeriesPerDatabase = 1000
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
@@ -157,12 +163,15 @@ func TestShard_MaxTagValuesLimit(t *testing.T) {
tmpShard := path.Join(tmpDir, "db", "rp", "1")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
opts.Config.MaxValuesPerTag = 1000
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
@@ -210,11 +219,14 @@ func TestWriteTimeTag(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -258,11 +270,14 @@ func TestWriteTimeField(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -291,11 +306,14 @@ func TestShardWriteAddNewField(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -341,11 +359,14 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -425,11 +446,14 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -570,11 +594,14 @@ func TestShard_Close_RemoveIndex(t *testing.T) {
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
- opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))
+ opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile)
- sh := tsdb.NewShard(1, tmpShard, tmpWal, opts)
+ sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
if err := sh.Open(); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
@@ -606,88 +633,86 @@ func TestShard_Close_RemoveIndex(t *testing.T) {
// Ensure a shard can create iterators for its underlying data.
func TestShard_CreateIterator_Ascending(t *testing.T) {
- var sh *Shard
- var itr query.Iterator
+ for _, index := range tsdb.RegisteredIndexes() {
+ t.Run(index, func(t *testing.T) {
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
- test := func(index string) {
- sh = NewShard(index)
+ sh := NewShard(index, sfile.SeriesFile)
+ defer sh.Close()
- // Calling CreateIterator when the engine is not open will return
- // ErrEngineClosed.
- m := &influxql.Measurement{Name: "cpu"}
- _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{})
- if exp := tsdb.ErrEngineClosed; got != exp {
- t.Fatalf("got %v, expected %v", got, exp)
- }
+ // Calling CreateIterator when the engine is not open will return
+ // ErrEngineClosed.
+ m := &influxql.Measurement{Name: "cpu"}
+ _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{})
+ if exp := tsdb.ErrEngineClosed; got != exp {
+ t.Fatalf("got %v, expected %v", got, exp)
+ }
- if err := sh.Open(); err != nil {
- t.Fatal(err)
- }
+ if err := sh.Open(); err != nil {
+ t.Fatal(err)
+ }
- sh.MustWritePointsString(`
+ sh.MustWritePointsString(`
cpu,host=serverA,region=uswest value=100 0
cpu,host=serverA,region=uswest value=50,val2=5 10
cpu,host=serverB,region=uswest value=25 0
`)
- // Create iterator.
- var err error
- m = &influxql.Measurement{Name: "cpu"}
- itr, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{
- Expr: influxql.MustParseExpr(`value`),
- Aux: []influxql.VarRef{{Val: "val2"}},
- Dimensions: []string{"host"},
- Ascending: true,
- StartTime: influxql.MinTime,
- EndTime: influxql.MaxTime,
- })
- if err != nil {
- t.Fatal(err)
- }
- fitr := itr.(query.FloatIterator)
-
- // Read values from iterator.
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(0): %s", err)
- } else if !deep.Equal(p, &query.FloatPoint{
- Name: "cpu",
- Tags: query.NewTags(map[string]string{"host": "serverA"}),
- Time: time.Unix(0, 0).UnixNano(),
- Value: 100,
- Aux: []interface{}{(*float64)(nil)},
- }) {
- t.Fatalf("unexpected point(0): %s", spew.Sdump(p))
- }
-
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(1): %s", err)
- } else if !deep.Equal(p, &query.FloatPoint{
- Name: "cpu",
- Tags: query.NewTags(map[string]string{"host": "serverA"}),
- Time: time.Unix(10, 0).UnixNano(),
- Value: 50,
- Aux: []interface{}{float64(5)},
- }) {
- t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
- }
+ // Create iterator.
+ var err error
+ m = &influxql.Measurement{Name: "cpu"}
+ itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{
+ Expr: influxql.MustParseExpr(`value`),
+ Aux: []influxql.VarRef{{Val: "val2"}},
+ Dimensions: []string{"host"},
+ Ascending: true,
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer itr.Close()
+ fitr := itr.(query.FloatIterator)
+
+ // Read values from iterator.
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(0): %s", err)
+ } else if !deep.Equal(p, &query.FloatPoint{
+ Name: "cpu",
+ Tags: query.NewTags(map[string]string{"host": "serverA"}),
+ Time: time.Unix(0, 0).UnixNano(),
+ Value: 100,
+ Aux: []interface{}{(*float64)(nil)},
+ }) {
+ t.Fatalf("unexpected point(0): %s", spew.Sdump(p))
+ }
- if p, err := fitr.Next(); err != nil {
- t.Fatalf("unexpected error(2): %s", err)
- } else if !deep.Equal(p, &query.FloatPoint{
- Name: "cpu",
- Tags: query.NewTags(map[string]string{"host": "serverB"}),
- Time: time.Unix(0, 0).UnixNano(),
- Value: 25,
- Aux: []interface{}{(*float64)(nil)},
- }) {
- t.Fatalf("unexpected point(2): %s", spew.Sdump(p))
- }
- }
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(1): %s", err)
+ } else if !deep.Equal(p, &query.FloatPoint{
+ Name: "cpu",
+ Tags: query.NewTags(map[string]string{"host": "serverA"}),
+ Time: time.Unix(10, 0).UnixNano(),
+ Value: 50,
+ Aux: []interface{}{float64(5)},
+ }) {
+ t.Fatalf("unexpected point(1): %s", spew.Sdump(p))
+ }
- for _, index := range tsdb.RegisteredIndexes() {
- t.Run(index, func(t *testing.T) { test(index) })
- sh.Close()
- itr.Close()
+ if p, err := fitr.Next(); err != nil {
+ t.Fatalf("unexpected error(2): %s", err)
+ } else if !deep.Equal(p, &query.FloatPoint{
+ Name: "cpu",
+ Tags: query.NewTags(map[string]string{"host": "serverB"}),
+ Time: time.Unix(0, 0).UnixNano(),
+ Value: 25,
+ Aux: []interface{}{(*float64)(nil)},
+ }) {
+ t.Fatalf("unexpected point(2): %s", spew.Sdump(p))
+ }
+ })
}
}
@@ -697,7 +722,10 @@ func TestShard_CreateIterator_Descending(t *testing.T) {
var itr query.Iterator
test := func(index string) {
- sh = NewShard(index)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ sh = NewShard(index, sfile.SeriesFile)
// Calling CreateIterator when the engine is not open will return
// ErrEngineClosed.
@@ -779,9 +807,6 @@ cpu,host=serverB,region=uswest value=25 0
}
func TestShard_CreateIterator_Series_Auth(t *testing.T) {
- var sh *Shard
- var itr query.Iterator
-
type variant struct {
name string
m *influxql.Measurement
@@ -802,7 +827,11 @@ func TestShard_CreateIterator_Series_Auth(t *testing.T) {
}
test := func(index string, v variant) error {
- sh = MustNewOpenShard(index)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ sh := MustNewOpenShard(index, sfile.SeriesFile)
+ defer sh.Close()
sh.MustWritePointsString(`
cpu,host=serverA,region=uswest value=100 0
cpu,host=serverA,region=uswest value=50,val2=5 10
@@ -822,8 +851,7 @@ cpu,secret=foo value=100 0
// Create iterator for case where we use cursors (e.g., where time
// included in a SHOW SERIES query).
- var err error
- itr, err = sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{
+ itr, err := sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{
Aux: v.aux,
Ascending: true,
StartTime: influxql.MinTime,
@@ -837,6 +865,7 @@ cpu,secret=foo value=100 0
if itr == nil {
return fmt.Errorf("iterator is nil")
}
+ defer itr.Close()
fitr := itr.(query.FloatIterator)
defer fitr.Close()
@@ -861,6 +890,58 @@ cpu,secret=foo value=100 0
if gotCount != expCount {
return fmt.Errorf("got %d series, expected %d", gotCount, expCount)
}
+
+ // Delete series cpu,host=serverA,region=uswest
+ idx, err := sh.Index()
+ if err != nil {
+ return err
+ }
+
+ if err := idx.DropSeries([]byte("cpu,host=serverA,region=uswest"), time.Now().UnixNano()); err != nil {
+ return err
+ }
+
+ if itr, err = sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{
+ Aux: v.aux,
+ Ascending: true,
+ StartTime: influxql.MinTime,
+ EndTime: influxql.MaxTime,
+ Authorizer: seriesAuthorizer,
+ }); err != nil {
+ return err
+ }
+
+ if itr == nil {
+ return fmt.Errorf("iterator is nil")
+ }
+ defer itr.Close()
+
+ fitr = itr.(query.FloatIterator)
+ defer fitr.Close()
+ expCount = 1
+ gotCount = 0
+ for {
+ f, err := fitr.Next()
+ if err != nil {
+ return err
+ }
+
+ if f == nil {
+ break
+ }
+
+ if got := f.Aux[0].(string); strings.Contains(got, "secret") {
+ return fmt.Errorf("got a series %q that should be filtered", got)
+ } else if got := f.Aux[0].(string); strings.Contains(got, "serverA") {
+ return fmt.Errorf("got a series %q that should be filtered", got)
+ }
+ gotCount++
+ }
+
+ if gotCount != expCount {
+ return fmt.Errorf("got %d series, expected %d", gotCount, expCount)
+ }
+
return nil
}
@@ -872,8 +953,6 @@ cpu,secret=foo value=100 0
}
})
}
- sh.Close()
- itr.Close()
}
}
@@ -881,7 +960,10 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
var sh *Shard
test := func(index string) {
- sh = NewShard(index)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ sh = NewShard(index, sfile.SeriesFile)
if err := sh.Open(); err != nil {
t.Fatal(err)
}
@@ -932,7 +1014,10 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
func TestShard_Closed_Functions(t *testing.T) {
var sh *Shard
test := func(index string) {
- sh = NewShard(index)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ sh = NewShard(index, sfile.SeriesFile)
if err := sh.Open(); err != nil {
t.Fatal(err)
}
@@ -950,13 +1035,6 @@ func TestShard_Closed_Functions(t *testing.T) {
sh.Close()
- // Should not panic, but returns an error when shard is closed
- if err := sh.ForEachMeasurementTagKey([]byte("cpu"), func(k []byte) error {
- return nil
- }); err == nil {
- t.Fatal("expected error: got nil")
- }
-
// Should not panic.
if exp, got := 0, sh.TagKeyCardinality([]byte("cpu"), []byte("host")); exp != got {
t.Fatalf("got %d, expected %d", got, exp)
@@ -971,8 +1049,11 @@ func TestShard_Closed_Functions(t *testing.T) {
func TestShard_FieldDimensions(t *testing.T) {
var sh *Shard
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
setup := func(index string) {
- sh = NewShard(index)
+ sh = NewShard(index, sfile.SeriesFile)
if err := sh.Open(); err != nil {
t.Fatal(err)
@@ -1087,8 +1168,11 @@ _reserved,region=uswest value="foo" 0
func TestShards_FieldDimensions(t *testing.T) {
var shard1, shard2 *Shard
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
setup := func(index string) {
- shard1 = NewShard(index)
+ shard1 = NewShard(index, sfile.SeriesFile)
if err := shard1.Open(); err != nil {
t.Fatal(err)
}
@@ -1099,7 +1183,7 @@ cpu,host=serverA,region=uswest value=50,val2=5 10
cpu,host=serverB,region=uswest value=25 0
`)
- shard2 = NewShard(index)
+ shard2 = NewShard(index, sfile.SeriesFile)
if err := shard2.Open(); err != nil {
t.Fatal(err)
}
@@ -1213,7 +1297,10 @@ func TestShards_MapType(t *testing.T) {
var shard1, shard2 *Shard
setup := func(index string) {
- shard1 = NewShard(index)
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
+ shard1 = NewShard(index, sfile.SeriesFile)
if err := shard1.Open(); err != nil {
t.Fatal(err)
}
@@ -1224,7 +1311,7 @@ cpu,host=serverA,region=uswest value=50,val2=5 10
cpu,host=serverB,region=uswest value=25 0
`)
- shard2 = NewShard(index)
+ shard2 = NewShard(index, sfile.SeriesFile)
if err := shard2.Open(); err != nil {
t.Fatal(err)
}
@@ -1351,8 +1438,11 @@ _reserved,region=uswest value="foo" 0
func TestShards_MeasurementsByRegex(t *testing.T) {
var shard1, shard2 *Shard
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
setup := func(index string) {
- shard1 = NewShard(index)
+ shard1 = NewShard(index, sfile.SeriesFile)
if err := shard1.Open(); err != nil {
t.Fatal(err)
}
@@ -1363,7 +1453,7 @@ cpu,host=serverA,region=uswest value=50,val2=5 10
cpu,host=serverB,region=uswest value=25 0
`)
- shard2 = NewShard(index)
+ shard2 = NewShard(index, sfile.SeriesFile)
if err := shard2.Open(); err != nil {
t.Fatal(err)
}
@@ -1642,7 +1732,7 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
points := []models.Point{}
for _, s := range series {
for val := 0.0; val < float64(pntCnt); val++ {
- p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now())
+ p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now())
points = append(points, p)
}
}
@@ -1651,12 +1741,15 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
b.StopTimer()
b.ResetTimer()
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
// Run the benchmark loop.
for n := 0; n < b.N; n++ {
tmpDir, _ := ioutil.TempDir("", "shard_test")
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
- shard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions())
+ shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, tsdb.NewEngineOptions())
shard.Open()
b.StartTimer()
@@ -1681,16 +1774,19 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt
points := []models.Point{}
for _, s := range series {
for val := 0.0; val < float64(pntCnt); val++ {
- p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now())
+ p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now())
points = append(points, p)
}
}
+ sfile := MustOpenSeriesFile()
+ defer sfile.Close()
+
tmpDir, _ := ioutil.TempDir("", "")
defer os.RemoveAll(tmpDir)
tmpShard := path.Join(tmpDir, "shard")
tmpWal := path.Join(tmpDir, "wal")
- shard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions())
+ shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, tsdb.NewEngineOptions())
shard.Open()
defer shard.Close()
chunkedWrite(shard, points)
@@ -1734,11 +1830,12 @@ func chunkedWrite(shard *tsdb.Shard, points []models.Point) {
// Shard represents a test wrapper for tsdb.Shard.
type Shard struct {
*tsdb.Shard
- path string
+ sfile *tsdb.SeriesFile
+ path string
}
// NewShard returns a new instance of Shard with temp paths.
-func NewShard(index string) *Shard {
+func NewShard(index string, sfile *tsdb.SeriesFile) *Shard {
// Create temporary path for data and WAL.
dir, err := ioutil.TempDir("", "influxdb-tsdb-")
if err != nil {
@@ -1750,22 +1847,24 @@ func NewShard(index string) *Shard {
opt.IndexVersion = index
opt.Config.WALDir = filepath.Join(dir, "wal")
if index == "inmem" {
- opt.InmemIndex = inmem.NewIndex(path.Base(dir))
+ opt.InmemIndex = inmem.NewIndex(path.Base(dir), sfile)
}
return &Shard{
Shard: tsdb.NewShard(0,
filepath.Join(dir, "data", "db0", "rp0", "1"),
filepath.Join(dir, "wal", "db0", "rp0", "1"),
+ sfile,
opt,
),
- path: dir,
+ sfile: sfile,
+ path: dir,
}
}
// MustNewOpenShard creates and opens a shard with the provided index.
-func MustNewOpenShard(index string) *Shard {
- sh := NewShard(index)
+func MustNewOpenShard(index string, sfile *tsdb.SeriesFile) *Shard {
+ sh := NewShard(index, sfile)
if err := sh.Open(); err != nil {
panic(err)
}
@@ -1798,3 +1897,30 @@ func MustTempDir() (string, func()) {
}
return dir, func() { os.RemoveAll(dir) }
}
+
+type seriesIterator struct {
+ keys [][]byte
+}
+
+type series struct {
+ name []byte
+ tags models.Tags
+ deleted bool
+}
+
+func (s series) Name() []byte { return s.name }
+func (s series) Tags() models.Tags { return s.tags }
+func (s series) Deleted() bool { return s.deleted }
+func (s series) Expr() influxql.Expr { return nil }
+
+func (itr *seriesIterator) Close() error { return nil }
+
+func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) {
+ if len(itr.keys) == 0 {
+ return nil, nil
+ }
+ name, tags := models.ParseKeyBytes(itr.keys[0])
+ s := series{name: name, tags: tags}
+ itr.keys = itr.keys[1:]
+ return s, nil
+}
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store.go b/vendor/github.com/influxdata/influxdb/tsdb/store.go
index 1a61b05d19..1e106a72eb 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/store.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/store.go
@@ -16,7 +16,6 @@ import (
"time"
"github.com/influxdata/influxdb/models"
- "github.com/influxdata/influxdb/pkg/bytesutil"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/influxdata/influxdb/query"
@@ -37,20 +36,22 @@ const (
statDatabaseMeasurements = "numMeasurements" // number of measurements in a database
)
+// SeriesFileDirectory is the name of the directory containing series files for
+// a database.
+const SeriesFileDirectory = "_series"
+
// Store manages shards and indexes for databases.
type Store struct {
- mu sync.RWMutex
- // databases keeps track of the number of databases being managed by the store.
- databases map[string]struct{}
-
- path string
+ mu sync.RWMutex
+ shards map[uint64]*Shard
+ databases map[string]struct{}
+ sfiles map[string]*SeriesFile
+ SeriesFileMaxSize int64 // Determines size of series file mmap. Can be altered in tests.
+ path string
// shared per-database indexes, only if using "inmem".
indexes map[string]interface{}
- // shards is a map of shard IDs to the associated Shard.
- shards map[uint64]*Shard
-
EngineOptions EngineOptions
baseLogger *zap.Logger
@@ -68,6 +69,7 @@ func NewStore(path string) *Store {
return &Store{
databases: make(map[string]struct{}),
path: path,
+ sfiles: make(map[string]*SeriesFile),
indexes: make(map[string]interface{}),
EngineOptions: NewEngineOptions(),
Logger: logger,
@@ -210,6 +212,12 @@ func (s *Store) loadShards() error {
continue
}
+ // Load series file.
+ sfile, err := s.openSeriesFile(db.Name())
+ if err != nil {
+ return err
+ }
+
// Retrieve database index.
idx, err := s.createIndexIfNotExists(db.Name())
if err != nil {
@@ -228,6 +236,11 @@ func (s *Store) loadShards() error {
continue
}
+ // The .series directory is not a retention policy.
+ if rp.Name() == SeriesFileDirectory {
+ continue
+ }
+
shardDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name(), rp.Name()))
if err != nil {
return err
@@ -260,7 +273,7 @@ func (s *Store) loadShards() error {
}
// Open engine.
- shard := NewShard(shardID, path, walPath, opt)
+ shard := NewShard(shardID, path, walPath, sfile, opt)
// Disable compactions, writes and queries until all shards are loaded
shard.EnableOnOpen = false
@@ -325,12 +338,42 @@ func (s *Store) Close() error {
}
s.mu.Lock()
+ for _, sfile := range s.sfiles {
+ // Close out the series files.
+ if err := sfile.Close(); err != nil {
+ return err
+ }
+ }
+
s.shards = nil
+ s.sfiles = map[string]*SeriesFile{}
s.opened = false // Store may now be opened again.
s.mu.Unlock()
return nil
}
+// openSeriesFile either returns or creates a series file for the provided
+// database. It must be called under a full lock.
+func (s *Store) openSeriesFile(database string) (*SeriesFile, error) {
+ if sfile := s.sfiles[database]; sfile != nil {
+ return sfile, nil
+ }
+
+ sfile := NewSeriesFile(filepath.Join(s.path, database, SeriesFileDirectory))
+ sfile.Logger = s.baseLogger
+ if err := sfile.Open(); err != nil {
+ return nil, err
+ }
+ s.sfiles[database] = sfile
+ return sfile, nil
+}
+
+func (s *Store) seriesFile(database string) *SeriesFile {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.sfiles[database]
+}
+
// createIndexIfNotExists returns a shared index for a database, if the inmem
// index is being used. If the TSI index is being used, then this method is
// basically a no-op.
@@ -339,7 +382,12 @@ func (s *Store) createIndexIfNotExists(name string) (interface{}, error) {
return idx, nil
}
- idx, err := NewInmemIndex(name)
+ sfile, err := s.openSeriesFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ idx, err := NewInmemIndex(name, sfile)
if err != nil {
return nil, err
}
@@ -387,10 +435,10 @@ func (s *Store) ShardN() int {
}
// ShardDigest returns a digest of the shard with the specified ID.
-func (s *Store) ShardDigest(id uint64) (io.ReadCloser, error) {
+func (s *Store) ShardDigest(id uint64) (io.ReadCloser, int64, error) {
sh := s.Shard(id)
if sh == nil {
- return nil, ErrShardNotFound
+ return nil, 0, ErrShardNotFound
}
return sh.Digest()
@@ -423,6 +471,12 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en
return err
}
+ // Retrieve database series file.
+ sfile, err := s.openSeriesFile(database)
+ if err != nil {
+ return err
+ }
+
// Retrieve shared index, if needed.
idx, err := s.createIndexIfNotExists(database)
if err != nil {
@@ -434,7 +488,7 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en
opt.InmemIndex = idx
path := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10))
- shard := NewShard(shardID, path, walPath, opt)
+ shard := NewShard(shardID, path, walPath, sfile, opt)
shard.WithLogger(s.baseLogger)
shard.EnableOnOpen = enabled
@@ -525,6 +579,19 @@ func (s *Store) DeleteDatabase(name string) error {
dbPath := filepath.Clean(filepath.Join(s.path, name))
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ sfile := s.sfiles[name]
+ delete(s.sfiles, name)
+
+ // Close series file.
+ if sfile != nil {
+ if err := sfile.Close(); err != nil {
+ return err
+ }
+ }
+
// extra sanity check to make sure that even if someone named their database "../.."
// that we don't delete everything because of it, they'll just have extra files forever
if filepath.Clean(s.path) != filepath.Dir(dbPath) {
@@ -538,7 +605,6 @@ func (s *Store) DeleteDatabase(name string) error {
return err
}
- s.mu.Lock()
for _, sh := range shards {
delete(s.shards, sh.id)
}
@@ -548,7 +614,6 @@ func (s *Store) DeleteDatabase(name string) error {
// Remove shared index for database if using inmem index.
delete(s.indexes, name)
- s.mu.Unlock()
return nil
}
@@ -778,12 +843,11 @@ func (s *Store) estimateCardinality(dbName string, getSketches func(*Shard) (est
// SeriesCardinality returns the series cardinality for the provided database.
func (s *Store) SeriesCardinality(database string) (int64, error) {
- return s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) {
- if sh == nil {
- return nil, nil, errors.New("shard nil, can't get cardinality")
- }
- return sh.SeriesSketches()
- })
+ sfile := s.seriesFile(database)
+ if sfile == nil {
+ return 0, nil
+ }
+ return int64(sfile.SeriesCount()), nil
}
// MeasurementsCardinality returns the measurement cardinality for the provided
@@ -873,7 +937,7 @@ func (s *Store) ShardRelativePath(id uint64) (string, error) {
// DeleteSeries loops through the local shards and deletes the series data for
// the passed in series keys.
-func (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error {
+func (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr, removeIndex bool) error {
// Expand regex expressions in the FROM clause.
a, err := s.ExpandSources(sources)
if err != nil {
@@ -901,13 +965,15 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi
max = influxql.MaxTime
}
- s.mu.RLock()
- shards := s.filterShards(byDatabase(database))
- s.mu.RUnlock()
-
s.mu.RLock()
defer s.mu.RUnlock()
+ sfile := s.sfiles[database]
+ if sfile == nil {
+ return fmt.Errorf("unable to locate series file for database: %q", database)
+ }
+ shards := s.filterShards(byDatabase(database))
+
// Limit to 1 delete for each shard since expanding the measurement into the list
// of series keys can be very memory intensive if run concurrently.
limit := limiter.NewFixed(1)
@@ -933,17 +999,22 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi
limit.Take()
defer limit.Release()
+ index, err := sh.Index()
+ if err != nil {
+ return err
+ }
+
+ indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sfile}
// Find matching series keys for each measurement.
for _, name := range names {
-
- itr, err := sh.MeasurementSeriesKeysByExprIterator([]byte(name), condition)
+ itr, err := indexSet.MeasurementSeriesByExprIterator([]byte(name), condition)
if err != nil {
return err
} else if itr == nil {
continue
}
-
- if err := sh.DeleteSeriesRange(itr, min, max); err != nil {
+ defer itr.Close()
+ if err := sh.DeleteSeriesRange(NewSeriesIteratorAdapter(sfile, itr), min, max, removeIndex); err != nil {
return err
}
@@ -998,34 +1069,22 @@ func (s *Store) MeasurementNames(auth query.Authorizer, database string, cond in
shards := s.filterShards(byDatabase(database))
s.mu.RUnlock()
- // If we're using the inmem index then all shards contain a duplicate
- // version of the global index. We don't need to iterate over all shards
- // since we have everything we need from the first shard.
- if len(shards) > 0 && shards[0].IndexType() == "inmem" {
- shards = shards[:1]
+ sfile := s.seriesFile(database)
+ if sfile == nil {
+ return nil, nil
}
- // Map to deduplicate measurement names across all shards. This is kind of naive
- // and could be improved using a sorted merge of the already sorted measurements in
- // each shard.
- set := make(map[string]struct{})
- var names [][]byte
+ // Build indexset.
+ is := IndexSet{Indexes: make([]Index, 0, len(shards)), SeriesFile: sfile}
for _, sh := range shards {
- a, err := sh.MeasurementNamesByExpr(auth, cond)
+ index, err := sh.Index()
if err != nil {
return nil, err
}
-
- for _, m := range a {
- if _, ok := set[string(m)]; !ok {
- set[string(m)] = struct{}{}
- names = append(names, m)
- }
- }
+ is.Indexes = append(is.Indexes, index)
}
- bytesutil.Sort(names)
-
- return names, nil
+ is = is.DedupeInmemIndexes()
+ return is.MeasurementNamesByExpr(auth, cond)
}
// MeasurementSeriesCounts returns the number of measurements and series in all
@@ -1059,6 +1118,10 @@ func (a tagKeysSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j]
// TagKeys returns the tag keys in the given database, matching the condition.
func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) {
+ if len(shardIDs) == 0 {
+ return nil, nil
+ }
+
measurementExpr := influxql.CloneExpr(cond)
measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr {
switch e := e.(type) {
@@ -1090,108 +1153,92 @@ func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.
}), nil)
// Get all the shards we're interested in.
- shards := make([]*Shard, 0, len(shardIDs))
+ is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))}
s.mu.RLock()
for _, sid := range shardIDs {
shard, ok := s.shards[sid]
if !ok {
continue
}
- shards = append(shards, shard)
- }
- s.mu.RUnlock()
- // If we're using the inmem index then all shards contain a duplicate
- // version of the global index. We don't need to iterate over all shards
- // since we have everything we need from the first shard.
- if len(shards) > 0 && shards[0].IndexType() == "inmem" {
- shards = shards[:1]
- }
-
- // Determine list of measurements.
- nameSet := make(map[string]struct{})
- for _, sh := range shards {
- // Checking for authorisation can be done later on, when non-matching
- // series might have been filtered out based on other conditions.
- names, err := sh.MeasurementNamesByExpr(nil, measurementExpr)
- if err != nil {
- return nil, err
- }
- for _, name := range names {
- nameSet[string(name)] = struct{}{}
+ if is.SeriesFile == nil {
+ is.SeriesFile = shard.sfile
}
+
+ is.Indexes = append(is.Indexes, shard.index)
}
+ s.mu.RUnlock()
- // Sort names.
- names := make([]string, 0, len(nameSet))
- for name := range nameSet {
- names = append(names, name)
+ // Determine list of measurements.
+ is = is.DedupeInmemIndexes()
+ names, err := is.MeasurementNamesByExpr(nil, measurementExpr)
+ if err != nil {
+ return nil, err
}
- sort.Strings(names)
// Iterate over each measurement.
var results []TagKeys
for _, name := range names {
- // Build keyset over all shards for measurement.
- keySet := map[string]struct{}{}
- for _, sh := range shards {
- shardKeySet, err := sh.MeasurementTagKeysByExpr([]byte(name), nil)
- if err != nil {
- return nil, err
- } else if len(shardKeySet) == 0 {
- continue
- }
- // If no tag value filter is present then all the tag keys can be returned
- // If they have authorized series associated with them.
- if filterExpr == nil {
- for tagKey := range shardKeySet {
- if sh.TagKeyHasAuthorizedSeries(auth, []byte(name), tagKey) {
- keySet[tagKey] = struct{}{}
- }
+ // Build keyset over all indexes for measurement.
+ tagKeySet, err := is.MeasurementTagKeysByExpr(name, nil)
+ if err != nil {
+ return nil, err
+ } else if len(tagKeySet) == 0 {
+ continue
+ }
+
+ keys := make([]string, 0, len(tagKeySet))
+ // If no tag value filter is present then all the tag keys can be returned
+ // If they have authorized series associated with them.
+ if filterExpr == nil {
+ for tagKey := range tagKeySet {
+ ok, err := is.TagKeyHasAuthorizedSeries(auth, []byte(name), []byte(tagKey))
+ if err != nil {
+ return nil, err
+ } else if ok {
+ keys = append(keys, tagKey)
}
- continue
}
+ sort.Strings(keys)
- // A tag value condition has been supplied. For each tag key filter
- // the set of tag values by the condition. Only tag keys with remaining
- // tag values will be included in the result set.
+ // Add to resultset.
+ results = append(results, TagKeys{
+ Measurement: string(name),
+ Keys: keys,
+ })
- // Sort the tag keys.
- shardKeys := make([]string, 0, len(shardKeySet))
- for k := range shardKeySet {
- shardKeys = append(shardKeys, k)
- }
- sort.Strings(shardKeys)
+ continue
+ }
- // TODO(edd): This is very expensive. We're materialising all unfiltered
- // tag values for all required tag keys, only to see if we have any.
- // Then we're throwing them all away as we only care about the tag
- // keys in the result set.
- shardValues, err := sh.MeasurementTagKeyValuesByExpr(auth, []byte(name), shardKeys, filterExpr, true)
- if err != nil {
- return nil, err
- }
+ // Tag filter provided so filter keys first.
- for i := range shardKeys {
- if len(shardValues[i]) == 0 {
- continue
- }
- keySet[shardKeys[i]] = struct{}{}
- }
+ // Sort the tag keys.
+ for k := range tagKeySet {
+ keys = append(keys, k)
}
+ sort.Strings(keys)
- // Sort key set.
- keys := make([]string, 0, len(keySet))
- for key := range keySet {
- keys = append(keys, key)
+ // Filter against tag values, skip if no values exist.
+ values, err := is.MeasurementTagKeyValuesByExpr(auth, name, keys, filterExpr, true)
+ if err != nil {
+ return nil, err
+ }
+
+ // Filter final tag keys using the matching values. If a key has one or
+ // more matching values then it will be included in the final set.
+ finalKeys := keys[:0] // Use same backing array as keys to save allocation.
+ for i, k := range keys {
+ if len(values[i]) > 0 {
+ // Tag key k has one or more matching tag values.
+ finalKeys = append(finalKeys, k)
+ }
}
- sort.Strings(keys)
// Add to resultset.
results = append(results, TagKeys{
- Measurement: name,
- Keys: keys,
+ Measurement: string(name),
+ Keys: finalKeys,
})
}
return results, nil
@@ -1261,105 +1308,101 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq
return e
}), nil)
- // Get set of Shards to work on.
- shards := make([]*Shard, 0, len(shardIDs))
+ // Build index set to work on.
+ is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))}
s.mu.RLock()
for _, sid := range shardIDs {
shard, ok := s.shards[sid]
if !ok {
continue
}
- shards = append(shards, shard)
- }
- s.mu.RUnlock()
- // If we're using the inmem index then all shards contain a duplicate
- // version of the global index. We don't need to iterate over all shards
- // since we have everything we need from the first shard.
- if len(shards) > 0 && shards[0].IndexType() == "inmem" {
- shards = shards[:1]
+ if is.SeriesFile == nil {
+ is.SeriesFile = shard.sfile
+ }
+ is.Indexes = append(is.Indexes, shard.index)
}
+ s.mu.RUnlock()
+ is = is.DedupeInmemIndexes()
// Stores each list of TagValues for each measurement.
var allResults []tagValues
var maxMeasurements int // Hint as to lower bound on number of measurements.
- for _, sh := range shards {
- // names will be sorted by MeasurementNamesByExpr.
- // Authorisation can be done later one, when series may have been filtered
- // out by other conditions.
- names, err := sh.MeasurementNamesByExpr(nil, measurementExpr)
+ // names will be sorted by MeasurementNamesByExpr.
+ // Authorisation can be done later on, when series may have been filtered
+ // out by other conditions.
+ names, err := is.MeasurementNamesByExpr(nil, measurementExpr)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(names) > maxMeasurements {
+ maxMeasurements = len(names)
+ }
+
+ if allResults == nil {
+ allResults = make([]tagValues, 0, len(is.Indexes)*len(names)) // Assuming all series in all shards.
+ }
+
+ // Iterate over each matching measurement in the shard. For each
+ // measurement we'll get the matching tag keys (e.g., when a WITH KEYS)
+ // statement is used, and we'll then use those to fetch all the relevant
+ // values from matching series. Series may be filtered using a WHERE
+ // filter.
+ for _, name := range names {
+ // Determine a list of keys from condition.
+ keySet, err := is.MeasurementTagKeysByExpr(name, cond)
if err != nil {
return nil, err
}
- if len(names) > maxMeasurements {
- maxMeasurements = len(names)
+ if len(keySet) == 0 {
+ // No matching tag keys for this measurement
+ continue
}
- if allResults == nil {
- allResults = make([]tagValues, 0, len(shards)*len(names)) // Assuming all series in all shards.
+ result := tagValues{
+ name: name,
+ keys: make([]string, 0, len(keySet)),
}
- // Iterate over each matching measurement in the shard. For each
- // measurement we'll get the matching tag keys (e.g., when a WITH KEYS)
- // statement is used, and we'll then use those to fetch all the relevant
- // values from matching series. Series may be filtered using a WHERE
- // filter.
- for _, name := range names {
- // Determine a list of keys from condition.
- keySet, err := sh.MeasurementTagKeysByExpr(name, cond)
- if err != nil {
- return nil, err
- }
-
- if len(keySet) == 0 {
- // No matching tag keys for this measurement
- continue
- }
-
- result := tagValues{
- name: name,
- keys: make([]string, 0, len(keySet)),
- }
+ // Add the keys to the tagValues and sort them.
+ for k := range keySet {
+ result.keys = append(result.keys, k)
+ }
+ sort.Sort(sort.StringSlice(result.keys))
- // Add the keys to the tagValues and sort them.
- for k := range keySet {
- result.keys = append(result.keys, k)
- }
- sort.Sort(sort.StringSlice(result.keys))
+ // get all the tag values for each key in the keyset.
+ // Each slice in the results contains the sorted values associated
+ // associated with each tag key for the measurement from the key set.
+ if result.values, err = is.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil {
+ return nil, err
+ }
- // get all the tag values for each key in the keyset.
- // Each slice in the results contains the sorted values associated
- // associated with each tag key for the measurement from the key set.
- if result.values, err = sh.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil {
- return nil, err
+ // remove any tag keys that didn't have any authorized values
+ j := 0
+ for i := range result.keys {
+ if len(result.values[i]) == 0 {
+ continue
}
- // remove any tag keys that didn't have any authorized values
- j := 0
- for i := range result.keys {
- if len(result.values[i]) == 0 {
- continue
- }
-
- result.keys[j] = result.keys[i]
- result.values[j] = result.values[i]
- j++
- }
- result.keys = result.keys[:j]
- result.values = result.values[:j]
+ result.keys[j] = result.keys[i]
+ result.values[j] = result.values[i]
+ j++
+ }
+ result.keys = result.keys[:j]
+ result.values = result.values[:j]
- // only include result if there are keys with values
- if len(result.keys) > 0 {
- allResults = append(allResults, result)
- }
+ // only include result if there are keys with values
+ if len(result.keys) > 0 {
+ allResults = append(allResults, result)
}
}
result := make([]TagValues, 0, maxMeasurements)
// We need to sort all results by measurement name.
- if len(shards) > 1 {
+ if len(is.Indexes) > 1 {
sort.Sort(tagValuesSlice(allResults))
}
@@ -1367,7 +1410,7 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq
var i, j int
// Used as a temporary buffer in mergeTagValues. There can be at most len(shards)
// instances of tagValues for a given measurement.
- idxBuf := make([][2]int, 0, len(shards))
+ idxBuf := make([][2]int, 0, len(is.Indexes))
for i < len(allResults) {
// Gather all occurrences of the same measurement for merging.
for j+1 < len(allResults) && bytes.Equal(allResults[j+1].name, allResults[i].name) {
@@ -1377,7 +1420,7 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq
// An invariant is that there can't be more than n instances of tag
// key value pairs for a given measurement, where n is the number of
// shards.
- if got, exp := j-i+1, len(shards); got > exp {
+ if got, exp := j-i+1, len(is.Indexes); got > exp {
return nil, fmt.Errorf("unexpected results returned engine. Got %d measurement sets for %d shards", got, exp)
}
@@ -1551,17 +1594,33 @@ func (s *Store) monitorShards() {
databases[db] = struct{}{}
dbLock.Unlock()
+ sfile := s.seriesFile(sh.database)
+ if sfile == nil {
+ return nil
+ }
+
+ firstShardIndex, err := sh.Index()
+ if err != nil {
+ return err
+ }
+
+ index, err := sh.Index()
+ if err != nil {
+ return err
+ }
+
// inmem shards share the same index instance so just use the first one to avoid
// allocating the same measurements repeatedly
- first := shards[0]
- names, err := first.MeasurementNamesByExpr(nil, nil)
+ indexSet := IndexSet{Indexes: []Index{firstShardIndex}, SeriesFile: sfile}
+ names, err := indexSet.MeasurementNamesByExpr(nil, nil)
if err != nil {
s.Logger.Warn("cannot retrieve measurement names", zap.Error(err))
return nil
}
+ indexSet.Indexes = []Index{index}
for _, name := range names {
- sh.ForEachMeasurementTagKey(name, func(k []byte) error {
+ indexSet.ForEachMeasurementTagKey(name, func(k []byte) error {
n := sh.TagKeyCardinality(name, k)
perc := int(float64(n) / float64(s.EngineOptions.Config.MaxValuesPerTag) * 100)
if perc > 100 {
diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store_test.go b/vendor/github.com/influxdata/influxdb/tsdb/store_test.go
index 929984475a..46194c00a4 100644
--- a/vendor/github.com/influxdata/influxdb/tsdb/store_test.go
+++ b/vendor/github.com/influxdata/influxdb/tsdb/store_test.go
@@ -539,7 +539,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) {
points := make([]models.Point, 0, len(series))
for _, s := range series {
- points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now()))
+ points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
@@ -561,7 +561,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) {
}
for _, name := range mnames {
- if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil {
+ if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil, true); err != nil {
t.Fatal(err)
}
}
@@ -592,6 +592,8 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) {
}
func TestStore_Cardinality_Tombstoning(t *testing.T) {
+ t.Skip("TODO(benbjohnson): Fix once series file moved to DB")
+
t.Parallel()
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
@@ -620,7 +622,7 @@ func testStoreCardinalityUnique(t *testing.T, store *Store) {
points := make([]models.Point, 0, len(series))
for _, s := range series {
- points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now()))
+ points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
@@ -658,6 +660,8 @@ func testStoreCardinalityUnique(t *testing.T, store *Store) {
}
func TestStore_Cardinality_Unique(t *testing.T) {
+ t.Skip("TODO(benbjohnson): Merge series file to DB level")
+
t.Parallel()
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
@@ -689,7 +693,7 @@ func testStoreCardinalityDuplicates(t *testing.T, store *Store) {
points := make([]models.Point, 0, len(series))
for _, s := range series {
- points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now()))
+ points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points.
@@ -765,7 +769,7 @@ func TestStore_Cardinality_Duplicates(t *testing.T) {
// Creates a large number of series in multiple shards, which will force
// compactions to occur.
-func testStoreCardinalityCompactions(t *testing.T, store *Store) {
+func testStoreCardinalityCompactions(store *Store) error {
// Generate point data to write to the shards.
series := genTestSeries(300, 5, 5) // 937,500 series
@@ -773,51 +777,50 @@ func testStoreCardinalityCompactions(t *testing.T, store *Store) {
points := make([]models.Point, 0, len(series))
for _, s := range series {
- points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now()))
+ points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now()))
}
// Create requested number of shards in the store & write points across
// shards such that we never write the same series to multiple shards.
for shardID := 0; shardID < 2; shardID++ {
if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil {
- t.Fatalf("create shard: %s", err)
+ return fmt.Errorf("create shard: %s", err)
}
if err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil {
- t.Fatalf("batch write: %s", err)
+ return fmt.Errorf("batch write: %s", err)
}
}
// Estimate the series cardinality...
cardinality, err := store.Store.SeriesCardinality("db")
if err != nil {
- t.Fatal(err)
+ return err
}
// Estimated cardinality should be well within 1.5% of the actual cardinality.
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {
- t.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp)
+ return fmt.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp)
}
// Estimate the measurement cardinality...
if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil {
- t.Fatal(err)
+ return err
}
// Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...)
expCardinality = 300
if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {
- t.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp)
+ return fmt.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp)
}
+ return nil
}
func TestStore_Cardinality_Compactions(t *testing.T) {
- t.Parallel()
-
if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" {
t.Skip("Skipping test in short, race and appveyor mode.")
}
- test := func(index string) {
+ test := func(index string) error {
store := NewStore()
store.EngineOptions.Config.Index = "inmem"
store.EngineOptions.Config.MaxSeriesPerDatabase = 0
@@ -825,11 +828,15 @@ func TestStore_Cardinality_Compactions(t *testing.T) {
panic(err)
}
defer store.Close()
- testStoreCardinalityCompactions(t, store)
+ return testStoreCardinalityCompactions(store)
}
for _, index := range tsdb.RegisteredIndexes() {
- t.Run(index, func(t *testing.T) { test(index) })
+ t.Run(index, func(t *testing.T) {
+ if err := test(index); err != nil {
+ t.Fatal(err)
+ }
+ })
}
}
@@ -1006,6 +1013,36 @@ func TestStore_Measurements_Auth(t *testing.T) {
if gotNames != expNames {
return fmt.Errorf("got %d measurements, but expected %d", gotNames, expNames)
}
+
+ // Now delete all of the cpu series.
+ cond, err := influxql.ParseExpr("host = 'serverA' OR region = 'west'")
+ if err != nil {
+ return err
+ }
+
+ if err := s.DeleteSeries("db0", nil, cond, true); err != nil {
+ return err
+ }
+
+ if names, err = s.MeasurementNames(authorizer, "db0", nil); err != nil {
+ return err
+ }
+
+ // names should not contain any measurements where none of the associated
+ // series are authorised for reads.
+ expNames = 1
+ gotNames = 0
+ for _, name := range names {
+ if string(name) == "mem" || string(name) == "cpu" {
+ return fmt.Errorf("after delete got measurement %q but it should be filtered.", name)
+ }
+ gotNames++
+ }
+
+ if gotNames != expNames {
+ return fmt.Errorf("after delete got %d measurements, but expected %d", gotNames, expNames)
+ }
+
return nil
}
@@ -1016,6 +1053,7 @@ func TestStore_Measurements_Auth(t *testing.T) {
}
})
}
+
}
func TestStore_TagKeys_Auth(t *testing.T) {
@@ -1068,6 +1106,41 @@ func TestStore_TagKeys_Auth(t *testing.T) {
if gotKeys != expKeys {
return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys)
}
+
+ // Delete the series with region = west
+ cond, err := influxql.ParseExpr("region = 'west'")
+ if err != nil {
+ return err
+ }
+ if err := s.DeleteSeries("db0", nil, cond, true); err != nil {
+ return err
+ }
+
+ if keys, err = s.TagKeys(authorizer, []uint64{0}, nil); err != nil {
+ return err
+ }
+
+ // keys should not contain any tag keys associated with a series containing
+ // a secret tag or the deleted series
+ expKeys = 2
+ gotKeys = 0
+ for _, tk := range keys {
+ if got, exp := tk.Measurement, "cpu"; got != exp {
+ return fmt.Errorf("got measurement %q, expected %q", got, exp)
+ }
+
+ for _, key := range tk.Keys {
+ if key == "secret" || key == "machine" || key == "region" {
+ return fmt.Errorf("got tag key %q but it should be filtered.", key)
+ }
+ gotKeys++
+ }
+ }
+
+ if gotKeys != expKeys {
+ return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys)
+ }
+
return nil
}
@@ -1078,6 +1151,7 @@ func TestStore_TagKeys_Auth(t *testing.T) {
}
})
}
+
}
func TestStore_TagValues_Auth(t *testing.T) {
@@ -1132,6 +1206,48 @@ func TestStore_TagValues_Auth(t *testing.T) {
}
}
+ if gotValues != expValues {
+ return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues)
+ }
+
+ // Delete the series with values serverA
+ cond, err := influxql.ParseExpr("host = 'serverA'")
+ if err != nil {
+ return err
+ }
+ if err := s.DeleteSeries("db0", nil, cond, true); err != nil {
+ return err
+ }
+
+ values, err = s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{
+ Op: influxql.EQ,
+ LHS: &influxql.VarRef{Val: "_tagKey"},
+ RHS: &influxql.StringLiteral{Val: "host"},
+ })
+
+ if err != nil {
+ return err
+ }
+
+ // values should not contain any tag values associated with a series containing
+ // a secret tag.
+ expValues = 1
+ gotValues = 0
+ for _, tv := range values {
+ if got, exp := tv.Measurement, "cpu"; got != exp {
+ return fmt.Errorf("got measurement %q, expected %q", got, exp)
+ }
+
+ for _, v := range tv.Values {
+ if got, exp := v.Value, "serverD"; got == exp {
+ return fmt.Errorf("got tag value %q but it should be filtered.", got)
+ } else if got, exp := v.Value, "serverA"; got == exp {
+ return fmt.Errorf("got tag value %q but it should be filtered.", got)
+ }
+ gotValues++
+ }
+ }
+
if gotValues != expValues {
return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues)
}
@@ -1214,7 +1330,7 @@ func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int)
points := []models.Point{}
for _, s := range series {
for val := 0.0; val < float64(pntCnt); val++ {
- p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now())
+ p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now())
points = append(points, p)
}
}
@@ -1396,6 +1512,7 @@ func NewStore() *Store {
if testing.Verbose() {
s.WithLogger(logger.New(os.Stdout))
}
+
return s
}
@@ -1404,6 +1521,7 @@ func NewStore() *Store {
func MustOpenStore(index string) *Store {
s := NewStore()
s.EngineOptions.IndexVersion = index
+
if err := s.Open(); err != nil {
panic(err)
}
@@ -1415,9 +1533,10 @@ func (s *Store) Reopen() error {
if err := s.Store.Close(); err != nil {
return err
}
+
s.Store = tsdb.NewStore(s.Path())
s.EngineOptions.Config.WALDir = filepath.Join(s.Path(), "wal")
- return s.Open()
+ return s.Store.Open()
}
// Close closes the store and removes the underlying data.
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/README.md b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/README.md
deleted file mode 100644
index 0b7332511e..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# kinako
-
-Kinako is small VM written in Go.
-
-![](https://raw.githubusercontent.com/mattn/kinako/master/kinako.png)
-
-(Picture licensed under CC BY-SA 3.0 by wikipedia)
-
-## Installation
-Requires Go.
-```
-$ go get -u github.com/mattn/kinako
-```
-
-## Usage
-
-Embedding the interpreter into your own program:
-
-```Go
-var env = vm.NewEnv()
-
-env.Define("foo", 1)
-val, err := env.Execute(`foo + 3`)
-if err != nil {
- panic(err)
-}
-
-fmt.Println(val)
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/_example/main.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/_example/main.go
deleted file mode 100644
index 71e2054c27..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/_example/main.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package main
-
-import (
- "fmt"
- "log"
-
- "github.com/mattn/kinako/vm"
-)
-
-func main() {
- env := vm.NewEnv()
- v, err := env.Execute(`foo=1; foo+3`)
- if err != nil {
- log.Fatal(err)
- }
-
- fmt.Println(v)
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/ast/expr.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/ast/expr.go
deleted file mode 100644
index 758cc08961..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/ast/expr.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package ast
-
-type Token struct {
- Tok int
- Lit string
-}
-
-// Position provides interface to store code locations.
-type Position struct {
- Line int
- Column int
-}
-
-// Expr provides all of interfaces for expression.
-type Expr interface {
- expr()
-}
-
-// ExprImpl provide commonly implementations for Expr.
-type ExprImpl struct {
-}
-
-// expr provide restraint interface.
-func (x *ExprImpl) expr() {}
-
-// NumberExpr provide Number expression.
-type NumberExpr struct {
- ExprImpl
- Lit string
-}
-
-// UnaryExpr provide unary minus expression. ex: -1, ^1, ~1.
-type UnaryExpr struct {
- ExprImpl
- Operator string
- Expr Expr
-}
-
-// IdentExpr provide identity expression.
-type IdentExpr struct {
- ExprImpl
- Lit string
-}
-
-// Stmt provides all of interfaces for statement.
-type Stmt interface {
- stmt()
-}
-
-// StmtImpl provide commonly implementations for Stmt..
-type StmtImpl struct {
-}
-
-// stmt provide restraint interface.
-func (x *StmtImpl) stmt() {}
-
-// LetsStmt provide multiple statement of let.
-type LetsStmt struct {
- StmtImpl
- Lhss []Expr
- Operator string
- Rhss []Expr
-}
-
-// StringExpr provide String expression.
-type StringExpr struct {
- ExprImpl
- Lit string
-}
-
-type TernaryOpExpr struct {
- ExprImpl
- Expr Expr
- Lhs Expr
- Rhs Expr
-}
-
-// CallExpr provide calling expression.
-type CallExpr struct {
- ExprImpl
- Func interface{}
- Name string
- SubExprs []Expr
-}
-
-// ParenExpr provide parent block expression.
-type ParenExpr struct {
- ExprImpl
- SubExpr Expr
-}
-
-// BinOpExpr provide binary operator expression.
-type BinOpExpr struct {
- ExprImpl
- Lhs Expr
- Operator string
- Rhs Expr
-}
-
-// ExprStmt provide expression statement.
-type ExprStmt struct {
- StmtImpl
- Expr Expr
-}
-
-// LetStmt provide statement of let.
-type LetStmt struct {
- StmtImpl
- Lhs Expr
- Operator string
- Rhs Expr
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/kinako.png b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/kinako.png
deleted file mode 100644
index a7130c6d2c..0000000000
Binary files a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/kinako.png and /dev/null differ
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/Makefile b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/Makefile
deleted file mode 100644
index 88d6ad6f48..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-all : parser.go
-
-parser.go : parser.go.y
- goyacc -o $@ parser.go.y
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/lexer.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/lexer.go
deleted file mode 100644
index f2d8b0ec57..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/lexer.go
+++ /dev/null
@@ -1,427 +0,0 @@
-package parser
-
-import (
- "errors"
- "fmt"
- "unicode"
-
- "github.com/mattn/kinako/ast"
-)
-
-const (
- EOF = -1 // End of file.
- EOL = '\n' // End of line.
-)
-
-// Error provides a convenient interface for handling runtime error.
-// It can be Error inteface with type cast which can call Pos().
-type Error struct {
- Message string
- Filename string
- Fatal bool
-}
-
-// Error returns the error message.
-func (e *Error) Error() string {
- return e.Message
-}
-
-// Scanner stores informations for lexer.
-type Scanner struct {
- src []rune
- offset int
- lineHead int
- line int
-}
-
-// Init resets code to scan.
-func (s *Scanner) Init(src string) {
- s.src = []rune(src)
-}
-
-// Scan analyses token, and decide identify or literals.
-func (s *Scanner) Scan() (tok int, lit string, pos ast.Position, err error) {
-retry:
- s.skipBlank()
- pos = s.pos()
- switch ch := s.peek(); {
- case isLetter(ch):
- tok = IDENT
- lit, err = s.scanIdentifier()
- if err != nil {
- return
- }
- case isDigit(ch):
- tok = NUMBER
- lit, err = s.scanNumber()
- if err != nil {
- return
- }
- case ch == '"':
- tok = STRING
- lit, err = s.scanString('"')
- if err != nil {
- return
- }
- case ch == '\'':
- tok = STRING
- lit, err = s.scanString('\'')
- if err != nil {
- return
- }
- case ch == '`':
- tok = STRING
- lit, err = s.scanRawString()
- if err != nil {
- return
- }
- default:
- switch ch {
- case EOF:
- tok = EOF
- case '#':
- for !isEOL(s.peek()) {
- s.next()
- }
- goto retry
- case '!':
- s.next()
- switch s.peek() {
- case '=':
- tok = NEQ
- lit = "!="
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '=':
- s.next()
- switch s.peek() {
- case '=':
- tok = EQEQ
- lit = "=="
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '+':
- tok = int(ch)
- lit = string(ch)
- case '-':
- tok = int(ch)
- lit = string(ch)
- case '*':
- tok = int(ch)
- lit = string(ch)
- case '/':
- tok = int(ch)
- lit = string(ch)
- case '>':
- s.next()
- switch s.peek() {
- case '=':
- tok = GE
- lit = ">="
- case '>':
- tok = SHIFTRIGHT
- lit = ">>"
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '<':
- s.next()
- switch s.peek() {
- case '=':
- tok = LE
- lit = "<="
- case '<':
- tok = SHIFTLEFT
- lit = "<<"
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '|':
- s.next()
- switch s.peek() {
- case '|':
- tok = OROR
- lit = "||"
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '&':
- s.next()
- switch s.peek() {
- case '&':
- tok = ANDAND
- lit = "&&"
- default:
- s.back()
- tok = int(ch)
- lit = string(ch)
- }
- case '.':
- tok = int(ch)
- lit = string(ch)
- case '\n':
- tok = int(ch)
- lit = string(ch)
- case '(', ')', ':', ';', '%', '?', '{', '}', ',', '[', ']', '^':
- tok = int(ch)
- lit = string(ch)
- default:
- err = fmt.Errorf(`syntax error "%s"`, string(ch))
- tok = int(ch)
- lit = string(ch)
- return
- }
- s.next()
- }
- return
-}
-
-// isLetter returns true if the rune is a letter for identity.
-func isLetter(ch rune) bool {
- return unicode.IsLetter(ch) || ch == '_'
-}
-
-// isDigit returns true if the rune is a number.
-func isDigit(ch rune) bool {
- return '0' <= ch && ch <= '9'
-}
-
-// isHex returns true if the rune is a hex digits.
-func isHex(ch rune) bool {
- return ('0' <= ch && ch <= '9') || ('a' <= ch && ch <= 'f') || ('A' <= ch && ch <= 'F')
-}
-
-// isEOL returns true if the rune is at end-of-line or end-of-file.
-func isEOL(ch rune) bool {
- return ch == '\n' || ch == -1
-}
-
-// isBlank returns true if the rune is empty character..
-func isBlank(ch rune) bool {
- return ch == ' ' || ch == '\t' || ch == '\r'
-}
-
-// peek returns current rune in the code.
-func (s *Scanner) peek() rune {
- if s.reachEOF() {
- return EOF
- }
- return s.src[s.offset]
-}
-
-// next moves offset to next.
-func (s *Scanner) next() {
- if !s.reachEOF() {
- if s.peek() == '\n' {
- s.lineHead = s.offset + 1
- s.line++
- }
- s.offset++
- }
-}
-
-// current returns the current offset.
-func (s *Scanner) current() int {
- return s.offset
-}
-
-// offset sets the offset value.
-func (s *Scanner) set(o int) {
- s.offset = o
-}
-
-// back moves back offset once to top.
-func (s *Scanner) back() {
- s.offset--
-}
-
-// reachEOF returns true if offset is at end-of-file.
-func (s *Scanner) reachEOF() bool {
- return len(s.src) <= s.offset
-}
-
-// pos returns the position of current.
-func (s *Scanner) pos() ast.Position {
- return ast.Position{Line: s.line + 1, Column: s.offset - s.lineHead + 1}
-}
-
-// skipBlank moves position into non-black character.
-func (s *Scanner) skipBlank() {
- for isBlank(s.peek()) {
- s.next()
- }
-}
-
-// scanIdentifier returns identifier begining at current position.
-func (s *Scanner) scanIdentifier() (string, error) {
- var ret []rune
- for {
- if !isLetter(s.peek()) && !isDigit(s.peek()) {
- break
- }
- ret = append(ret, s.peek())
- s.next()
- }
- return string(ret), nil
-}
-
-// scanNumber returns number begining at current position.
-func (s *Scanner) scanNumber() (string, error) {
- var ret []rune
- ch := s.peek()
- ret = append(ret, ch)
- s.next()
- if ch == '0' && s.peek() == 'x' {
- ret = append(ret, s.peek())
- s.next()
- for isHex(s.peek()) {
- ret = append(ret, s.peek())
- s.next()
- }
- } else {
- for isDigit(s.peek()) || s.peek() == '.' {
- ret = append(ret, s.peek())
- s.next()
- }
- if s.peek() == 'e' {
- ret = append(ret, s.peek())
- s.next()
- if isDigit(s.peek()) || s.peek() == '+' || s.peek() == '-' {
- ret = append(ret, s.peek())
- s.next()
- for isDigit(s.peek()) || s.peek() == '.' {
- ret = append(ret, s.peek())
- s.next()
- }
- }
- for isDigit(s.peek()) || s.peek() == '.' {
- ret = append(ret, s.peek())
- s.next()
- }
- }
- if isLetter(s.peek()) {
- return "", errors.New("identifier starts immediately after numeric literal")
- }
- }
- return string(ret), nil
-}
-
-// scanRawString returns raw-string starting at current position.
-func (s *Scanner) scanRawString() (string, error) {
- var ret []rune
- for {
- s.next()
- if s.peek() == EOF {
- return "", errors.New("unexpected EOF")
- break
- }
- if s.peek() == '`' {
- s.next()
- break
- }
- ret = append(ret, s.peek())
- }
- return string(ret), nil
-}
-
-// scanString returns string starting at current position.
-// This handles backslash escaping.
-func (s *Scanner) scanString(l rune) (string, error) {
- var ret []rune
-eos:
- for {
- s.next()
- switch s.peek() {
- case EOL:
- return "", errors.New("unexpected EOL")
- case EOF:
- return "", errors.New("unexpected EOF")
- case l:
- s.next()
- break eos
- case '\\':
- s.next()
- switch s.peek() {
- case 'b':
- ret = append(ret, '\b')
- continue
- case 'f':
- ret = append(ret, '\f')
- continue
- case 'r':
- ret = append(ret, '\r')
- continue
- case 'n':
- ret = append(ret, '\n')
- continue
- case 't':
- ret = append(ret, '\t')
- continue
- }
- ret = append(ret, s.peek())
- continue
- default:
- ret = append(ret, s.peek())
- }
- }
- return string(ret), nil
-}
-
-// Lexer provides inteface to parse codes.
-type Lexer struct {
- s *Scanner
- lit string
- pos ast.Position
- e error
- stmts []ast.Stmt
-}
-
-// Lex scans the token and literals.
-func (l *Lexer) Lex(lval *yySymType) int {
- tok, lit, pos, err := l.s.Scan()
- if err != nil {
- l.e = &Error{Message: fmt.Sprintf("%s", err.Error()), Fatal: true}
- }
- lval.tok = ast.Token{Tok: tok, Lit: lit}
- l.lit = lit
- l.pos = pos
- return tok
-}
-
-// Error sets parse error.
-func (l *Lexer) Error(msg string) {
- l.e = &Error{Message: msg, Fatal: false}
-}
-
-// Parser provides way to parse the code using Scanner.
-func Parse(s *Scanner) ([]ast.Stmt, error) {
- l := Lexer{s: s}
- if yyParse(&l) != 0 {
- return nil, l.e
- }
- return l.stmts, l.e
-}
-
-func EnableErrorVerbose() {
- yyErrorVerbose = true
-}
-
-// ParserSrc provides way to parse the code from source.
-func ParseSrc(src string) ([]ast.Stmt, error) {
- scanner := &Scanner{
- src: []rune(src),
- }
- return Parse(scanner)
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go
deleted file mode 100644
index 661cd19c3b..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go
+++ /dev/null
@@ -1,778 +0,0 @@
-//line parser.go.y:2
-package parser
-
-import __yyfmt__ "fmt"
-
-//line parser.go.y:2
-import (
- "github.com/mattn/kinako/ast"
-)
-
-//line parser.go.y:16
-type yySymType struct {
- yys int
- compstmt []ast.Stmt
- stmts []ast.Stmt
- stmt ast.Stmt
- expr ast.Expr
- exprs []ast.Expr
- tok ast.Token
- term ast.Token
- terms ast.Token
- opt_terms ast.Token
-}
-
-const IDENT = 57346
-const NUMBER = 57347
-const STRING = 57348
-const EQEQ = 57349
-const NEQ = 57350
-const GE = 57351
-const LE = 57352
-const OROR = 57353
-const ANDAND = 57354
-const POW = 57355
-const SHIFTLEFT = 57356
-const SHIFTRIGHT = 57357
-const PLUSPLUS = 57358
-const MINUSMINUS = 57359
-const UNARY = 57360
-
-var yyToknames = [...]string{
- "$end",
- "error",
- "$unk",
- "IDENT",
- "NUMBER",
- "STRING",
- "EQEQ",
- "NEQ",
- "GE",
- "LE",
- "OROR",
- "ANDAND",
- "POW",
- "'='",
- "'?'",
- "':'",
- "','",
- "'>'",
- "'<'",
- "SHIFTLEFT",
- "SHIFTRIGHT",
- "'+'",
- "'-'",
- "PLUSPLUS",
- "MINUSMINUS",
- "'*'",
- "'/'",
- "'%'",
- "UNARY",
- "'!'",
- "'^'",
- "'('",
- "')'",
- "'|'",
- "'&'",
- "';'",
- "'\\n'",
-}
-var yyStatenames = [...]string{}
-
-const yyEofCode = 1
-const yyErrCode = 2
-const yyInitialStackSize = 16
-
-//line parser.go.y:213
-
-//line yacctab:1
-var yyExca = [...]int{
- -1, 1,
- 1, -1,
- -2, 0,
- -1, 50,
- 7, 0,
- 8, 0,
- -2, 20,
- -1, 51,
- 7, 0,
- 8, 0,
- -2, 21,
-}
-
-const yyNprod = 40
-const yyPrivate = 57344
-
-var yyTokenNames []string
-var yyStates []string
-
-const yyLast = 251
-
-var yyAct = [...]int{
-
- 9, 6, 7, 33, 35, 37, 22, 23, 60, 3,
- 24, 25, 26, 38, 39, 40, 1, 41, 33, 35,
- 8, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 55, 56, 57, 58, 59, 61, 42,
- 27, 28, 30, 32, 34, 36, 65, 0, 21, 63,
- 4, 29, 31, 2, 18, 22, 23, 17, 0, 24,
- 25, 26, 64, 0, 66, 0, 67, 33, 35, 27,
- 28, 30, 32, 34, 36, 0, 0, 21, 0, 0,
- 29, 31, 0, 0, 22, 23, 0, 0, 24, 25,
- 26, 0, 0, 0, 0, 62, 33, 35, 27, 28,
- 30, 32, 34, 36, 0, 20, 21, 0, 0, 29,
- 31, 0, 0, 22, 23, 5, 0, 24, 25, 26,
- 19, 0, 0, 0, 0, 33, 35, 27, 28, 30,
- 32, 34, 36, 0, 19, 21, 0, 0, 29, 31,
- 0, 0, 22, 23, 0, 0, 24, 25, 26, 0,
- 0, 0, 0, 0, 33, 35, 27, 28, 30, 32,
- 0, 36, 0, 0, 0, 0, 0, 29, 31, 0,
- 0, 22, 23, 0, 0, 24, 25, 26, 27, 28,
- 30, 32, 0, 33, 35, 0, 0, 0, 0, 29,
- 31, 0, 0, 22, 23, 0, 0, 24, 25, 26,
- 30, 32, 10, 11, 15, 33, 35, 0, 0, 29,
- 31, 0, 0, 22, 23, 0, 0, 24, 25, 26,
- 0, 12, 10, 11, 15, 33, 35, 0, 13, 14,
- 16, 24, 25, 26, 6, 7, 0, 0, 0, 33,
- 35, 12, 0, 0, 0, 0, 0, 0, 13, 14,
- 16,
-}
-var yyPact = [...]int{
-
- -35, -1000, 218, -35, -35, -1000, -1000, -1000, -1000, 91,
- -27, -1000, 218, 218, 218, -1000, 218, -1000, 198, -1000,
- 218, 218, 218, 218, 218, 218, 218, 218, 218, 218,
- 218, 218, 218, 218, 218, 218, 218, 218, -31, -31,
- -31, 62, -1000, 120, 33, 205, 205, -31, -31, -31,
- 191, 191, -16, -16, -16, -16, 120, 149, 120, 171,
- 29, 120, -1000, 218, -1000, 218, 120, 120,
-}
-var yyPgo = [...]int{
-
- 0, 16, 9, 20, 0, 8, 53, 50, 115,
-}
-var yyR1 = [...]int{
-
- 0, 1, 1, 2, 2, 3, 3, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 5, 5, 5, 6, 6, 7, 7, 8, 8,
-}
-var yyR2 = [...]int{
-
- 0, 1, 2, 2, 3, 3, 1, 1, 1, 2,
- 2, 2, 1, 5, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 4, 0, 1, 3, 0, 1, 1, 2, 1, 1,
-}
-var yyChk = [...]int{
-
- -1000, -1, -6, -2, -7, -8, 36, 37, -3, -4,
- 4, 5, 23, 30, 31, 6, 32, -6, -7, -8,
- 14, 15, 22, 23, 26, 27, 28, 7, 8, 18,
- 9, 19, 10, 34, 11, 35, 12, 32, -4, -4,
- -4, -4, -3, -4, -4, -4, -4, -4, -4, -4,
- -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
- -5, -4, 33, 16, 33, 17, -4, -4,
-}
-var yyDef = [...]int{
-
- 34, -2, 1, 34, 35, 36, 38, 39, 3, 6,
- 7, 8, 0, 0, 0, 12, 0, 2, 35, 37,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 31, 9, 10,
- 11, 0, 4, 5, 0, 15, 16, 17, 18, 19,
- -2, -2, 22, 23, 24, 25, 26, 27, 28, 29,
- 0, 32, 14, 0, 30, 0, 13, 33,
-}
-var yyTok1 = [...]int{
-
- 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 37, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 30, 3, 3, 3, 28, 35, 3,
- 32, 33, 26, 22, 17, 23, 3, 27, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 16, 36,
- 19, 14, 18, 15, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 31, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 34,
-}
-var yyTok2 = [...]int{
-
- 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 20, 21, 24, 25, 29,
-}
-var yyTok3 = [...]int{
- 0,
-}
-
-var yyErrorMessages = [...]struct {
- state int
- token int
- msg string
-}{}
-
-//line yaccpar:1
-
-/* parser for yacc output */
-
-var (
- yyDebug = 0
- yyErrorVerbose = false
-)
-
-type yyLexer interface {
- Lex(lval *yySymType) int
- Error(s string)
-}
-
-type yyParser interface {
- Parse(yyLexer) int
- Lookahead() int
-}
-
-type yyParserImpl struct {
- lval yySymType
- stack [yyInitialStackSize]yySymType
- char int
-}
-
-func (p *yyParserImpl) Lookahead() int {
- return p.char
-}
-
-func yyNewParser() yyParser {
- return &yyParserImpl{}
-}
-
-const yyFlag = -1000
-
-func yyTokname(c int) string {
- if c >= 1 && c-1 < len(yyToknames) {
- if yyToknames[c-1] != "" {
- return yyToknames[c-1]
- }
- }
- return __yyfmt__.Sprintf("tok-%v", c)
-}
-
-func yyStatname(s int) string {
- if s >= 0 && s < len(yyStatenames) {
- if yyStatenames[s] != "" {
- return yyStatenames[s]
- }
- }
- return __yyfmt__.Sprintf("state-%v", s)
-}
-
-func yyErrorMessage(state, lookAhead int) string {
- const TOKSTART = 4
-
- if !yyErrorVerbose {
- return "syntax error"
- }
-
- for _, e := range yyErrorMessages {
- if e.state == state && e.token == lookAhead {
- return "syntax error: " + e.msg
- }
- }
-
- res := "syntax error: unexpected " + yyTokname(lookAhead)
-
- // To match Bison, suggest at most four expected tokens.
- expected := make([]int, 0, 4)
-
- // Look for shiftable tokens.
- base := yyPact[state]
- for tok := TOKSTART; tok-1 < len(yyToknames); tok++ {
- if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok {
- if len(expected) == cap(expected) {
- return res
- }
- expected = append(expected, tok)
- }
- }
-
- if yyDef[state] == -2 {
- i := 0
- for yyExca[i] != -1 || yyExca[i+1] != state {
- i += 2
- }
-
- // Look for tokens that we accept or reduce.
- for i += 2; yyExca[i] >= 0; i += 2 {
- tok := yyExca[i]
- if tok < TOKSTART || yyExca[i+1] == 0 {
- continue
- }
- if len(expected) == cap(expected) {
- return res
- }
- expected = append(expected, tok)
- }
-
- // If the default action is to accept or reduce, give up.
- if yyExca[i+1] != 0 {
- return res
- }
- }
-
- for i, tok := range expected {
- if i == 0 {
- res += ", expecting "
- } else {
- res += " or "
- }
- res += yyTokname(tok)
- }
- return res
-}
-
-func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
- token = 0
- char = lex.Lex(lval)
- if char <= 0 {
- token = yyTok1[0]
- goto out
- }
- if char < len(yyTok1) {
- token = yyTok1[char]
- goto out
- }
- if char >= yyPrivate {
- if char < yyPrivate+len(yyTok2) {
- token = yyTok2[char-yyPrivate]
- goto out
- }
- }
- for i := 0; i < len(yyTok3); i += 2 {
- token = yyTok3[i+0]
- if token == char {
- token = yyTok3[i+1]
- goto out
- }
- }
-
-out:
- if token == 0 {
- token = yyTok2[1] /* unknown char */
- }
- if yyDebug >= 3 {
- __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
- }
- return char, token
-}
-
-func yyParse(yylex yyLexer) int {
- return yyNewParser().Parse(yylex)
-}
-
-func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
- var yyn int
- var yyVAL yySymType
- var yyDollar []yySymType
- _ = yyDollar // silence set and not used
- yyS := yyrcvr.stack[:]
-
- Nerrs := 0 /* number of errors */
- Errflag := 0 /* error recovery flag */
- yystate := 0
- yyrcvr.char = -1
- yytoken := -1 // yyrcvr.char translated into internal numbering
- defer func() {
- // Make sure we report no lookahead when not parsing.
- yystate = -1
- yyrcvr.char = -1
- yytoken = -1
- }()
- yyp := -1
- goto yystack
-
-ret0:
- return 0
-
-ret1:
- return 1
-
-yystack:
- /* put a state and value onto the stack */
- if yyDebug >= 4 {
- __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
- }
-
- yyp++
- if yyp >= len(yyS) {
- nyys := make([]yySymType, len(yyS)*2)
- copy(nyys, yyS)
- yyS = nyys
- }
- yyS[yyp] = yyVAL
- yyS[yyp].yys = yystate
-
-yynewstate:
- yyn = yyPact[yystate]
- if yyn <= yyFlag {
- goto yydefault /* simple state */
- }
- if yyrcvr.char < 0 {
- yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
- }
- yyn += yytoken
- if yyn < 0 || yyn >= yyLast {
- goto yydefault
- }
- yyn = yyAct[yyn]
- if yyChk[yyn] == yytoken { /* valid shift */
- yyrcvr.char = -1
- yytoken = -1
- yyVAL = yyrcvr.lval
- yystate = yyn
- if Errflag > 0 {
- Errflag--
- }
- goto yystack
- }
-
-yydefault:
- /* default state action */
- yyn = yyDef[yystate]
- if yyn == -2 {
- if yyrcvr.char < 0 {
- yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
- }
-
- /* look through exception table */
- xi := 0
- for {
- if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
- break
- }
- xi += 2
- }
- for xi += 2; ; xi += 2 {
- yyn = yyExca[xi+0]
- if yyn < 0 || yyn == yytoken {
- break
- }
- }
- yyn = yyExca[xi+1]
- if yyn < 0 {
- goto ret0
- }
- }
- if yyn == 0 {
- /* error ... attempt to resume parsing */
- switch Errflag {
- case 0: /* brand new error */
- yylex.Error(yyErrorMessage(yystate, yytoken))
- Nerrs++
- if yyDebug >= 1 {
- __yyfmt__.Printf("%s", yyStatname(yystate))
- __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
- }
- fallthrough
-
- case 1, 2: /* incompletely recovered error ... try again */
- Errflag = 3
-
- /* find a state where "error" is a legal shift action */
- for yyp >= 0 {
- yyn = yyPact[yyS[yyp].yys] + yyErrCode
- if yyn >= 0 && yyn < yyLast {
- yystate = yyAct[yyn] /* simulate a shift of "error" */
- if yyChk[yystate] == yyErrCode {
- goto yystack
- }
- }
-
- /* the current p has no shift on "error", pop stack */
- if yyDebug >= 2 {
- __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
- }
- yyp--
- }
- /* there is no state on the stack with an error shift ... abort */
- goto ret1
-
- case 3: /* no shift yet; clobber input char */
- if yyDebug >= 2 {
- __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
- }
- if yytoken == yyEofCode {
- goto ret1
- }
- yyrcvr.char = -1
- yytoken = -1
- goto yynewstate /* try again in the same state */
- }
- }
-
- /* reduction by production yyn */
- if yyDebug >= 2 {
- __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
- }
-
- yynt := yyn
- yypt := yyp
- _ = yypt // guard against "declared and not used"
-
- yyp -= yyR2[yyn]
- // yyp is now the index of $0. Perform the default action. Iff the
- // reduced production is ε, $1 is possibly out of range.
- if yyp+1 >= len(yyS) {
- nyys := make([]yySymType, len(yyS)*2)
- copy(nyys, yyS)
- yyS = nyys
- }
- yyVAL = yyS[yyp+1]
-
- /* consult goto table to find next state */
- yyn = yyR1[yyn]
- yyg := yyPgo[yyn]
- yyj := yyg + yyS[yyp].yys + 1
-
- if yyj >= yyLast {
- yystate = yyAct[yyg]
- } else {
- yystate = yyAct[yyj]
- if yyChk[yystate] != -yyn {
- yystate = yyAct[yyg]
- }
- }
- // dummy call; replaced with literal code
- switch yynt {
-
- case 1:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:45
- {
- yyVAL.compstmt = nil
- }
- case 2:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:49
- {
- yyVAL.compstmt = yyDollar[1].stmts
- }
- case 3:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:55
- {
- yyVAL.stmts = []ast.Stmt{yyDollar[2].stmt}
- if l, ok := yylex.(*Lexer); ok {
- l.stmts = yyVAL.stmts
- }
- }
- case 4:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:62
- {
- if yyDollar[3].stmt != nil {
- yyVAL.stmts = append(yyDollar[1].stmts, yyDollar[3].stmt)
- if l, ok := yylex.(*Lexer); ok {
- l.stmts = yyVAL.stmts
- }
- }
- }
- case 5:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:73
- {
- yyVAL.stmt = &ast.LetStmt{Lhs: yyDollar[1].expr, Operator: "=", Rhs: yyDollar[3].expr}
- }
- case 6:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:77
- {
- yyVAL.stmt = &ast.ExprStmt{Expr: yyDollar[1].expr}
- }
- case 7:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:83
- {
- yyVAL.expr = &ast.IdentExpr{Lit: yyDollar[1].tok.Lit}
- }
- case 8:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:87
- {
- yyVAL.expr = &ast.NumberExpr{Lit: yyDollar[1].tok.Lit}
- }
- case 9:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:91
- {
- yyVAL.expr = &ast.UnaryExpr{Operator: "-", Expr: yyDollar[2].expr}
- }
- case 10:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:95
- {
- yyVAL.expr = &ast.UnaryExpr{Operator: "!", Expr: yyDollar[2].expr}
- }
- case 11:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:99
- {
- yyVAL.expr = &ast.UnaryExpr{Operator: "^", Expr: yyDollar[2].expr}
- }
- case 12:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:103
- {
- yyVAL.expr = &ast.StringExpr{Lit: yyDollar[1].tok.Lit}
- }
- case 13:
- yyDollar = yyS[yypt-5 : yypt+1]
- //line parser.go.y:107
- {
- yyVAL.expr = &ast.TernaryOpExpr{Expr: yyDollar[1].expr, Lhs: yyDollar[3].expr, Rhs: yyDollar[5].expr}
- }
- case 14:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:111
- {
- yyVAL.expr = &ast.ParenExpr{SubExpr: yyDollar[2].expr}
- }
- case 15:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:115
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "+", Rhs: yyDollar[3].expr}
- }
- case 16:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:119
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "-", Rhs: yyDollar[3].expr}
- }
- case 17:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:123
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "*", Rhs: yyDollar[3].expr}
- }
- case 18:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:127
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "/", Rhs: yyDollar[3].expr}
- }
- case 19:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:131
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "%", Rhs: yyDollar[3].expr}
- }
- case 20:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:135
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "==", Rhs: yyDollar[3].expr}
- }
- case 21:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:139
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "!=", Rhs: yyDollar[3].expr}
- }
- case 22:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:143
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: ">", Rhs: yyDollar[3].expr}
- }
- case 23:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:147
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: ">=", Rhs: yyDollar[3].expr}
- }
- case 24:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:151
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "<", Rhs: yyDollar[3].expr}
- }
- case 25:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:155
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "<=", Rhs: yyDollar[3].expr}
- }
- case 26:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:159
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "|", Rhs: yyDollar[3].expr}
- }
- case 27:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:163
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "||", Rhs: yyDollar[3].expr}
- }
- case 28:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:167
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "&", Rhs: yyDollar[3].expr}
- }
- case 29:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:171
- {
- yyVAL.expr = &ast.BinOpExpr{Lhs: yyDollar[1].expr, Operator: "&&", Rhs: yyDollar[3].expr}
- }
- case 30:
- yyDollar = yyS[yypt-4 : yypt+1]
- //line parser.go.y:175
- {
- yyVAL.expr = &ast.CallExpr{Name: yyDollar[1].tok.Lit, SubExprs: yyDollar[3].exprs}
- }
- case 31:
- yyDollar = yyS[yypt-0 : yypt+1]
- //line parser.go.y:180
- {
- yyVAL.exprs = nil
- }
- case 32:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:184
- {
- yyVAL.exprs = []ast.Expr{yyDollar[1].expr}
- }
- case 33:
- yyDollar = yyS[yypt-3 : yypt+1]
- //line parser.go.y:188
- {
- yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr)
- }
- case 36:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:198
- {
- }
- case 37:
- yyDollar = yyS[yypt-2 : yypt+1]
- //line parser.go.y:201
- {
- }
- case 38:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:206
- {
- }
- case 39:
- yyDollar = yyS[yypt-1 : yypt+1]
- //line parser.go.y:209
- {
- }
- }
- goto yystack /* stack new state and value */
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go.y b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go.y
deleted file mode 100644
index 84086ac9a6..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/parser.go.y
+++ /dev/null
@@ -1,214 +0,0 @@
-%{
-package parser
-
-import (
- "github.com/mattn/kinako/ast"
-)
-
-%}
-
-%type compstmt
-%type stmts
-%type stmt
-%type expr
-%type exprs
-
-%union{
- compstmt []ast.Stmt
- stmts []ast.Stmt
- stmt ast.Stmt
- expr ast.Expr
- exprs []ast.Expr
- tok ast.Token
- term ast.Token
- terms ast.Token
- opt_terms ast.Token
-}
-
-%token IDENT NUMBER STRING EQEQ NEQ GE LE OROR ANDAND POW
-
-%right '='
-%right '?' ':'
-%left OROR
-%left ANDAND
-%left IDENT
-%nonassoc EQEQ NEQ ','
-%left '>' GE '<' LE SHIFTLEFT SHIFTRIGHT
-
-%left '+' '-' PLUSPLUS MINUSMINUS
-%left '*' '/' '%'
-%right UNARY
-
-%%
-
-compstmt : opt_terms
- {
- $$ = nil
- }
- | stmts opt_terms
- {
- $$ = $1
- }
-
-stmts :
- opt_terms stmt
- {
- $$ = []ast.Stmt{$2}
- if l, ok := yylex.(*Lexer); ok {
- l.stmts = $$
- }
- }
- | stmts terms stmt
- {
- if $3 != nil {
- $$ = append($1, $3)
- if l, ok := yylex.(*Lexer); ok {
- l.stmts = $$
- }
- }
- }
-
-stmt :
- expr '=' expr
- {
- $$ = &ast.LetStmt{Lhs: $1, Operator: "=", Rhs: $3}
- }
- | expr
- {
- $$ = &ast.ExprStmt{Expr: $1}
- }
-
-expr :
- IDENT
- {
- $$ = &ast.IdentExpr{Lit: $1.Lit}
- }
- | NUMBER
- {
- $$ = &ast.NumberExpr{Lit: $1.Lit}
- }
- | '-' expr %prec UNARY
- {
- $$ = &ast.UnaryExpr{Operator: "-", Expr: $2}
- }
- | '!' expr %prec UNARY
- {
- $$ = &ast.UnaryExpr{Operator: "!", Expr: $2}
- }
- | '^' expr %prec UNARY
- {
- $$ = &ast.UnaryExpr{Operator: "^", Expr: $2}
- }
- | STRING
- {
- $$ = &ast.StringExpr{Lit: $1.Lit}
- }
- | expr '?' expr ':' expr
- {
- $$ = &ast.TernaryOpExpr{Expr: $1, Lhs: $3, Rhs: $5}
- }
- | '(' expr ')'
- {
- $$ = &ast.ParenExpr{SubExpr: $2}
- }
- | expr '+' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "+", Rhs: $3}
- }
- | expr '-' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "-", Rhs: $3}
- }
- | expr '*' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "*", Rhs: $3}
- }
- | expr '/' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "/", Rhs: $3}
- }
- | expr '%' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "%", Rhs: $3}
- }
- | expr EQEQ expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "==", Rhs: $3}
- }
- | expr NEQ expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "!=", Rhs: $3}
- }
- | expr '>' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: ">", Rhs: $3}
- }
- | expr GE expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: ">=", Rhs: $3}
- }
- | expr '<' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "<", Rhs: $3}
- }
- | expr LE expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "<=", Rhs: $3}
- }
- | expr '|' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "|", Rhs: $3}
- }
- | expr OROR expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "||", Rhs: $3}
- }
- | expr '&' expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "&", Rhs: $3}
- }
- | expr ANDAND expr
- {
- $$ = &ast.BinOpExpr{Lhs: $1, Operator: "&&", Rhs: $3}
- }
- | IDENT '(' exprs ')'
- {
- $$ = &ast.CallExpr{Name: $1.Lit, SubExprs: $3}
- }
-
-exprs :
- {
- $$ = nil
- }
- | expr
- {
- $$ = []ast.Expr{$1}
- }
- | exprs ',' expr
- {
- $$ = append($1, $3)
- }
-
-opt_terms : /* none */
- | terms
- ;
-
-
-terms : term
- {
- }
- | terms term
- {
- }
- ;
-
-term : ';'
- {
- }
- | '\n'
- {
- }
- ;
-
-%%
-
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/y.output b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/y.output
deleted file mode 100644
index 1a0f45edf3..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/parser/y.output
+++ /dev/null
@@ -1,1381 +0,0 @@
-
-state 0
- $accept: .compstmt $end
- opt_terms: . (34)
-
- ';' shift 6
- '\n' shift 7
- . reduce 34 (src line 192)
-
- compstmt goto 1
- stmts goto 3
- opt_terms goto 2
- terms goto 4
- term goto 5
-
-state 1
- $accept: compstmt.$end
-
- $end accept
- . error
-
-
-state 2
- compstmt: opt_terms. (1)
- stmts: opt_terms.stmt
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . reduce 1 (src line 44)
-
- stmt goto 8
- expr goto 9
-
-state 3
- compstmt: stmts.opt_terms
- stmts: stmts.terms stmt
- opt_terms: . (34)
-
- ';' shift 6
- '\n' shift 7
- . reduce 34 (src line 192)
-
- opt_terms goto 17
- terms goto 18
- term goto 5
-
-state 4
- opt_terms: terms. (35)
- terms: terms.term
-
- ';' shift 6
- '\n' shift 7
- . reduce 35 (src line 193)
-
- term goto 19
-
-state 5
- terms: term. (36)
-
- . reduce 36 (src line 197)
-
-
-state 6
- term: ';'. (38)
-
- . reduce 38 (src line 205)
-
-
-state 7
- term: '\n'. (39)
-
- . reduce 39 (src line 208)
-
-
-state 8
- stmts: opt_terms stmt. (3)
-
- . reduce 3 (src line 53)
-
-
-state 9
- stmt: expr.'=' expr
- stmt: expr. (6)
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '=' shift 20
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 6 (src line 76)
-
-
-state 10
- expr: IDENT. (7)
- expr: IDENT.'(' exprs ')'
-
- '(' shift 37
- . reduce 7 (src line 81)
-
-
-state 11
- expr: NUMBER. (8)
-
- . reduce 8 (src line 86)
-
-
-state 12
- expr: '-'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 38
-
-state 13
- expr: '!'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 39
-
-state 14
- expr: '^'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 40
-
-state 15
- expr: STRING. (12)
-
- . reduce 12 (src line 102)
-
-
-state 16
- expr: '('.expr ')'
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 41
-
-state 17
- compstmt: stmts opt_terms. (2)
-
- . reduce 2 (src line 48)
-
-
-state 18
- stmts: stmts terms.stmt
- opt_terms: terms. (35)
- terms: terms.term
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- ';' shift 6
- '\n' shift 7
- . reduce 35 (src line 193)
-
- stmt goto 42
- expr goto 9
- term goto 19
-
-state 19
- terms: terms term. (37)
-
- . reduce 37 (src line 200)
-
-
-state 20
- stmt: expr '='.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 43
-
-state 21
- expr: expr '?'.expr ':' expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 44
-
-state 22
- expr: expr '+'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 45
-
-state 23
- expr: expr '-'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 46
-
-state 24
- expr: expr '*'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 47
-
-state 25
- expr: expr '/'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 48
-
-state 26
- expr: expr '%'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 49
-
-state 27
- expr: expr EQEQ.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 50
-
-state 28
- expr: expr NEQ.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 51
-
-state 29
- expr: expr '>'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 52
-
-state 30
- expr: expr GE.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 53
-
-state 31
- expr: expr '<'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 54
-
-state 32
- expr: expr LE.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 55
-
-state 33
- expr: expr '|'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 56
-
-state 34
- expr: expr OROR.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 57
-
-state 35
- expr: expr '&'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 58
-
-state 36
- expr: expr ANDAND.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 59
-
-state 37
- expr: IDENT '('.exprs ')'
- exprs: . (31)
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . reduce 31 (src line 179)
-
- expr goto 61
- exprs goto 60
-
-38: shift/reduce conflict (shift 33(0), red'n 9(10)) on '|'
-38: shift/reduce conflict (shift 35(0), red'n 9(10)) on '&'
-state 38
- expr: '-' expr. (9)
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 9 (src line 90)
-
-
-39: shift/reduce conflict (shift 33(0), red'n 10(10)) on '|'
-39: shift/reduce conflict (shift 35(0), red'n 10(10)) on '&'
-state 39
- expr: '!' expr. (10)
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 10 (src line 94)
-
-
-40: shift/reduce conflict (shift 33(0), red'n 11(10)) on '|'
-40: shift/reduce conflict (shift 35(0), red'n 11(10)) on '&'
-state 40
- expr: '^' expr. (11)
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 11 (src line 98)
-
-
-state 41
- expr: expr.'?' expr ':' expr
- expr: '(' expr.')'
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- ')' shift 62
- '|' shift 33
- '&' shift 35
- . error
-
-
-state 42
- stmts: stmts terms stmt. (4)
-
- . reduce 4 (src line 61)
-
-
-state 43
- stmt: expr '=' expr. (5)
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 5 (src line 71)
-
-
-state 44
- expr: expr.'?' expr ':' expr
- expr: expr '?' expr.':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- ':' shift 63
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . error
-
-
-45: shift/reduce conflict (shift 33(0), red'n 15(8)) on '|'
-45: shift/reduce conflict (shift 35(0), red'n 15(8)) on '&'
-state 45
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr '+' expr. (15)
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 15 (src line 114)
-
-
-46: shift/reduce conflict (shift 33(0), red'n 16(8)) on '|'
-46: shift/reduce conflict (shift 35(0), red'n 16(8)) on '&'
-state 46
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr '-' expr. (16)
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 16 (src line 118)
-
-
-47: shift/reduce conflict (shift 33(0), red'n 17(9)) on '|'
-47: shift/reduce conflict (shift 35(0), red'n 17(9)) on '&'
-state 47
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr '*' expr. (17)
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 17 (src line 122)
-
-
-48: shift/reduce conflict (shift 33(0), red'n 18(9)) on '|'
-48: shift/reduce conflict (shift 35(0), red'n 18(9)) on '&'
-state 48
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr '/' expr. (18)
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 18 (src line 126)
-
-
-49: shift/reduce conflict (shift 33(0), red'n 19(9)) on '|'
-49: shift/reduce conflict (shift 35(0), red'n 19(9)) on '&'
-state 49
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr '%' expr. (19)
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '|' shift 33
- '&' shift 35
- . reduce 19 (src line 130)
-
-
-50: shift/reduce conflict (shift 33(0), red'n 20(6)) on '|'
-50: shift/reduce conflict (shift 35(0), red'n 20(6)) on '&'
-state 50
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr EQEQ expr. (20)
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ error
- NEQ error
- GE shift 30
- LE shift 32
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 20 (src line 134)
-
-
-51: shift/reduce conflict (shift 33(0), red'n 21(6)) on '|'
-51: shift/reduce conflict (shift 35(0), red'n 21(6)) on '&'
-state 51
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr NEQ expr. (21)
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ error
- NEQ error
- GE shift 30
- LE shift 32
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 21 (src line 138)
-
-
-52: shift/reduce conflict (shift 33(0), red'n 22(7)) on '|'
-52: shift/reduce conflict (shift 35(0), red'n 22(7)) on '&'
-state 52
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr '>' expr. (22)
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 22 (src line 142)
-
-
-53: shift/reduce conflict (shift 33(0), red'n 23(7)) on '|'
-53: shift/reduce conflict (shift 35(0), red'n 23(7)) on '&'
-state 53
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr GE expr. (23)
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 23 (src line 146)
-
-
-54: shift/reduce conflict (shift 33(0), red'n 24(7)) on '|'
-54: shift/reduce conflict (shift 35(0), red'n 24(7)) on '&'
-state 54
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr '<' expr. (24)
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 24 (src line 150)
-
-
-55: shift/reduce conflict (shift 33(0), red'n 25(7)) on '|'
-55: shift/reduce conflict (shift 35(0), red'n 25(7)) on '&'
-state 55
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr LE expr. (25)
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 25 (src line 154)
-
-
-56: shift/reduce conflict (shift 27(6), red'n 26(0)) on EQEQ
-56: shift/reduce conflict (shift 28(6), red'n 26(0)) on NEQ
-56: shift/reduce conflict (shift 30(7), red'n 26(0)) on GE
-56: shift/reduce conflict (shift 32(7), red'n 26(0)) on LE
-56: shift/reduce conflict (shift 34(3), red'n 26(0)) on OROR
-56: shift/reduce conflict (shift 36(4), red'n 26(0)) on ANDAND
-56: shift/reduce conflict (shift 21(2), red'n 26(0)) on '?'
-56: shift/reduce conflict (shift 29(7), red'n 26(0)) on '>'
-56: shift/reduce conflict (shift 31(7), red'n 26(0)) on '<'
-56: shift/reduce conflict (shift 22(8), red'n 26(0)) on '+'
-56: shift/reduce conflict (shift 23(8), red'n 26(0)) on '-'
-56: shift/reduce conflict (shift 24(9), red'n 26(0)) on '*'
-56: shift/reduce conflict (shift 25(9), red'n 26(0)) on '/'
-56: shift/reduce conflict (shift 26(9), red'n 26(0)) on '%'
-56: shift/reduce conflict (shift 33(0), red'n 26(0)) on '|'
-56: shift/reduce conflict (shift 35(0), red'n 26(0)) on '&'
-state 56
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr '|' expr. (26)
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 26 (src line 158)
-
-
-57: shift/reduce conflict (shift 33(0), red'n 27(3)) on '|'
-57: shift/reduce conflict (shift 35(0), red'n 27(3)) on '&'
-state 57
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr OROR expr. (27)
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- ANDAND shift 36
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 27 (src line 162)
-
-
-58: shift/reduce conflict (shift 27(6), red'n 28(0)) on EQEQ
-58: shift/reduce conflict (shift 28(6), red'n 28(0)) on NEQ
-58: shift/reduce conflict (shift 30(7), red'n 28(0)) on GE
-58: shift/reduce conflict (shift 32(7), red'n 28(0)) on LE
-58: shift/reduce conflict (shift 34(3), red'n 28(0)) on OROR
-58: shift/reduce conflict (shift 36(4), red'n 28(0)) on ANDAND
-58: shift/reduce conflict (shift 21(2), red'n 28(0)) on '?'
-58: shift/reduce conflict (shift 29(7), red'n 28(0)) on '>'
-58: shift/reduce conflict (shift 31(7), red'n 28(0)) on '<'
-58: shift/reduce conflict (shift 22(8), red'n 28(0)) on '+'
-58: shift/reduce conflict (shift 23(8), red'n 28(0)) on '-'
-58: shift/reduce conflict (shift 24(9), red'n 28(0)) on '*'
-58: shift/reduce conflict (shift 25(9), red'n 28(0)) on '/'
-58: shift/reduce conflict (shift 26(9), red'n 28(0)) on '%'
-58: shift/reduce conflict (shift 33(0), red'n 28(0)) on '|'
-58: shift/reduce conflict (shift 35(0), red'n 28(0)) on '&'
-state 58
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr '&' expr. (28)
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 28 (src line 166)
-
-
-59: shift/reduce conflict (shift 33(0), red'n 29(4)) on '|'
-59: shift/reduce conflict (shift 35(0), red'n 29(4)) on '&'
-state 59
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
- expr: expr ANDAND expr. (29)
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 29 (src line 170)
-
-
-state 60
- expr: IDENT '(' exprs.')'
- exprs: exprs.',' expr
-
- ',' shift 65
- ')' shift 64
- . error
-
-
-state 61
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
- exprs: expr. (32)
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 32 (src line 183)
-
-
-state 62
- expr: '(' expr ')'. (14)
-
- . reduce 14 (src line 110)
-
-
-state 63
- expr: expr '?' expr ':'.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 66
-
-state 64
- expr: IDENT '(' exprs ')'. (30)
-
- . reduce 30 (src line 174)
-
-
-state 65
- exprs: exprs ','.expr
-
- IDENT shift 10
- NUMBER shift 11
- STRING shift 15
- '-' shift 12
- '!' shift 13
- '^' shift 14
- '(' shift 16
- . error
-
- expr goto 67
-
-66: shift/reduce conflict (shift 33(0), red'n 13(2)) on '|'
-66: shift/reduce conflict (shift 35(0), red'n 13(2)) on '&'
-state 66
- expr: expr.'?' expr ':' expr
- expr: expr '?' expr ':' expr. (13)
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 13 (src line 106)
-
-
-state 67
- expr: expr.'?' expr ':' expr
- expr: expr.'+' expr
- expr: expr.'-' expr
- expr: expr.'*' expr
- expr: expr.'/' expr
- expr: expr.'%' expr
- expr: expr.EQEQ expr
- expr: expr.NEQ expr
- expr: expr.'>' expr
- expr: expr.GE expr
- expr: expr.'<' expr
- expr: expr.LE expr
- expr: expr.'|' expr
- expr: expr.OROR expr
- expr: expr.'&' expr
- expr: expr.ANDAND expr
- exprs: exprs ',' expr. (33)
-
- EQEQ shift 27
- NEQ shift 28
- GE shift 30
- LE shift 32
- OROR shift 34
- ANDAND shift 36
- '?' shift 21
- '>' shift 29
- '<' shift 31
- '+' shift 22
- '-' shift 23
- '*' shift 24
- '/' shift 25
- '%' shift 26
- '|' shift 33
- '&' shift 35
- . reduce 33 (src line 187)
-
-
-37 terminals, 9 nonterminals
-40 grammar rules, 68/2000 states
-66 shift/reduce, 0 reduce/reduce conflicts reported
-58 working sets used
-memory: parser 38/30000
-49 extra closures
-439 shift entries, 5 exceptions
-37 goto entries
-2 entries saved by goto default
-Optimizer space used: output 251/30000
-251 table entries, 74 zero
-maximum spread: 37, maximum offset: 65
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/env.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/env.go
deleted file mode 100644
index 08b68673eb..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/env.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package vm
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-
- "github.com/mattn/kinako/parser"
-)
-
-// Env provides interface to run VM. This mean function scope and blocked-scope.
-// If stack goes to blocked-scope, it will make new Env.
-type Env struct {
- name string
- env map[string]reflect.Value
- typ map[string]reflect.Type
- parent *Env
- interrupt *bool
- sync.RWMutex
-}
-
-// NewEnv creates new global scope.
-func NewEnv() *Env {
- b := false
-
- return &Env{
- env: make(map[string]reflect.Value),
- typ: make(map[string]reflect.Type),
- parent: nil,
- interrupt: &b,
- }
-}
-
-// NewEnv creates new child scope.
-func (e *Env) NewEnv() *Env {
- return &Env{
- env: make(map[string]reflect.Value),
- typ: make(map[string]reflect.Type),
- parent: e,
- name: e.name,
- interrupt: e.interrupt,
- }
-}
-
-func NewPackage(n string) *Env {
- b := false
-
- return &Env{
- env: make(map[string]reflect.Value),
- typ: make(map[string]reflect.Type),
- parent: nil,
- name: n,
- interrupt: &b,
- }
-}
-
-func (e *Env) NewPackage(n string) *Env {
- return &Env{
- env: make(map[string]reflect.Value),
- typ: make(map[string]reflect.Type),
- parent: e,
- name: n,
- interrupt: e.interrupt,
- }
-}
-
-// Destroy deletes current scope.
-func (e *Env) Destroy() {
- e.Lock()
- defer e.Unlock()
-
- if e.parent == nil {
- return
- }
- for k, v := range e.parent.env {
- if v.IsValid() && v.Interface() == e {
- delete(e.parent.env, k)
- }
- }
- e.parent = nil
- e.env = nil
-}
-
-// NewModule creates new module scope as global.
-func (e *Env) NewModule(n string) *Env {
- m := &Env{
- env: make(map[string]reflect.Value),
- parent: e,
- name: n,
- }
- e.Define(n, m)
- return m
-}
-
-// SetName sets a name of the scope. This means that the scope is module.
-func (e *Env) SetName(n string) {
- e.Lock()
- e.name = n
- e.Unlock()
-}
-
-// GetName returns module name.
-func (e *Env) GetName() string {
- e.RLock()
- defer e.RUnlock()
-
- return e.name
-}
-
-// Addr returns pointer value which specified symbol. It goes to upper scope until
-// found or returns error.
-func (e *Env) Addr(k string) (reflect.Value, error) {
- e.RLock()
- defer e.RUnlock()
-
- if v, ok := e.env[k]; ok {
- return v.Addr(), nil
- }
- if e.parent == nil {
- return NilValue, fmt.Errorf("Undefined symbol '%s'", k)
- }
- return e.parent.Addr(k)
-}
-
-// Type returns type which specified symbol. It goes to upper scope until
-// found or returns error.
-func (e *Env) Type(k string) (reflect.Type, error) {
- e.RLock()
- defer e.RUnlock()
-
- if v, ok := e.typ[k]; ok {
- return v, nil
- }
- if e.parent == nil {
- return NilType, fmt.Errorf("Undefined type '%s'", k)
- }
- return e.parent.Type(k)
-}
-
-// Get returns value which specified symbol. It goes to upper scope until
-// found or returns error.
-func (e *Env) Get(k string) (reflect.Value, error) {
- e.RLock()
- defer e.RUnlock()
-
- if v, ok := e.env[k]; ok {
- return v, nil
- }
- if e.parent == nil {
- return NilValue, fmt.Errorf("Undefined symbol '%s'", k)
- }
- return e.parent.Get(k)
-}
-
-// Set modifies value which specified as symbol. It goes to upper scope until
-// found or returns error.
-func (e *Env) Set(k string, v interface{}) error {
- e.Lock()
- defer e.Unlock()
-
- if _, ok := e.env[k]; ok {
- val, ok := v.(reflect.Value)
- if !ok {
- val = reflect.ValueOf(v)
- }
- e.env[k] = val
- return nil
- }
- if e.parent == nil {
- return fmt.Errorf("Unknown symbol '%s'", k)
- }
- return e.parent.Set(k, v)
-}
-
-// DefineGlobal defines symbol in global scope.
-func (e *Env) DefineGlobal(k string, v interface{}) error {
- if e.parent == nil {
- return e.Define(k, v)
- }
- return e.parent.DefineGlobal(k, v)
-}
-
-// DefineType defines type which specifis symbol in global scope.
-func (e *Env) DefineType(k string, t interface{}) error {
- if strings.Contains(k, ".") {
- return fmt.Errorf("Unknown symbol '%s'", k)
- }
- global := e
- keys := []string{k}
-
- e.RLock()
- for global.parent != nil {
- if global.name != "" {
- keys = append(keys, global.name)
- }
- global = global.parent
- }
- e.RUnlock()
-
- for i, j := 0, len(keys)-1; i < j; i, j = i+1, j-1 {
- keys[i], keys[j] = keys[j], keys[i]
- }
-
- typ, ok := t.(reflect.Type)
- if !ok {
- typ = reflect.TypeOf(t)
- }
-
- global.Lock()
- global.typ[strings.Join(keys, ".")] = typ
- global.Unlock()
-
- return nil
-}
-
-// Define defines symbol in current scope.
-func (e *Env) Define(k string, v interface{}) error {
- if strings.Contains(k, ".") {
- return fmt.Errorf("Unknown symbol '%s'", k)
- }
- val, ok := v.(reflect.Value)
- if !ok {
- val = reflect.ValueOf(v)
- }
-
- e.Lock()
- e.env[k] = val
- e.Unlock()
-
- return nil
-}
-
-// String return the name of current scope.
-func (e *Env) String() string {
- e.RLock()
- defer e.RUnlock()
-
- return e.name
-}
-
-// Dump show symbol values in the scope.
-func (e *Env) Dump() {
- e.RLock()
- for k, v := range e.env {
- fmt.Printf("%v = %#v\n", k, v)
- }
- e.RUnlock()
-}
-
-// Execute parses and runs source in current scope.
-func (e *Env) Execute(src string) (reflect.Value, error) {
- stmts, err := parser.ParseSrc(src)
- if err != nil {
- return NilValue, err
- }
- return Run(stmts, e)
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm.go
deleted file mode 100644
index efbdb8cadc..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm.go
+++ /dev/null
@@ -1,476 +0,0 @@
-package vm
-
-import (
- "errors"
- "fmt"
- "math"
- "os"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/mattn/kinako/ast"
-)
-
-var (
- NilValue = reflect.ValueOf((*interface{})(nil))
- NilType = reflect.TypeOf((*interface{})(nil))
- TrueValue = reflect.ValueOf(true)
- FalseValue = reflect.ValueOf(false)
-)
-
-// Error provides a convenient interface for handling runtime error.
-// It can be Error interface with type cast which can call Pos().
-type Error struct {
- Message string
-}
-
-var (
- BreakError = errors.New("Unexpected break statement")
- ContinueError = errors.New("Unexpected continue statement")
- ReturnError = errors.New("Unexpected return statement")
- InterruptError = errors.New("Execution interrupted")
-)
-
-// Error returns the error message.
-func (e *Error) Error() string {
- return e.Message
-}
-
-// Func is function interface to reflect functions internaly.
-type Func func(args ...reflect.Value) (reflect.Value, error)
-
-// Run executes statements in the specified environment.
-func Run(stmts []ast.Stmt, env *Env) (reflect.Value, error) {
- rv := NilValue
- var err error
- for _, stmt := range stmts {
- rv, err = RunSingleStmt(stmt, env)
- if err != nil {
- return rv, err
- }
- }
- return rv, nil
-}
-
-// Interrupts the execution of any running statements in the specified environment.
-//
-// Note that the execution is not instantly aborted: after a call to Interrupt,
-// the current running statement will finish, but the next statement will not run,
-// and instead will return a NilValue and an InterruptError.
-func Interrupt(env *Env) {
- env.Lock()
- *(env.interrupt) = true
- env.Unlock()
-}
-
-// RunSingleStmt executes one statement in the specified environment.
-func RunSingleStmt(stmt ast.Stmt, env *Env) (reflect.Value, error) {
- env.Lock()
- if *(env.interrupt) {
- *(env.interrupt) = false
- env.Unlock()
-
- return NilValue, InterruptError
- }
- env.Unlock()
-
- switch stmt := stmt.(type) {
- case *ast.ExprStmt:
- rv, err := invokeExpr(stmt.Expr, env)
- if err != nil {
- return rv, err
- }
- return rv, nil
- case *ast.LetStmt:
- rv := NilValue
- var err error
- rv, err = invokeExpr(stmt.Rhs, env)
- if err != nil {
- return rv, err
- }
- _, err = invokeLetExpr(stmt.Lhs, rv, env)
- if err != nil {
- return rv, err
- }
- return rv, nil
- default:
- return NilValue, errors.New("unknown statement")
- }
-}
-
-// toString converts all reflect.Value-s into string.
-func toString(v reflect.Value) string {
- if v.Kind() == reflect.Interface {
- v = v.Elem()
- }
- if v.Kind() == reflect.String {
- return v.String()
- }
- if !v.IsValid() {
- return "nil"
- }
- return fmt.Sprint(v.Interface())
-}
-
-// toBool converts all reflect.Value-s into bool.
-func toBool(v reflect.Value) bool {
- if v.Kind() == reflect.Interface {
- v = v.Elem()
- }
-
- switch v.Kind() {
- case reflect.Float32, reflect.Float64:
- return v.Float() != 0.0
- case reflect.Int, reflect.Int32, reflect.Int64:
- return v.Int() != 0
- case reflect.Bool:
- return v.Bool()
- case reflect.String:
- if v.String() == "true" {
- return true
- }
- if toInt64(v) != 0 {
- return true
- }
- }
- return false
-}
-
-// toFloat64 converts all reflect.Value-s into float64.
-func toFloat64(v reflect.Value) float64 {
- if v.Kind() == reflect.Interface {
- v = v.Elem()
- }
- switch v.Kind() {
- case reflect.Float32, reflect.Float64:
- return v.Float()
- case reflect.Int, reflect.Int32, reflect.Int64:
- return float64(v.Int())
- }
- return 0.0
-}
-
-func isNil(v reflect.Value) bool {
- if !v.IsValid() || v.Kind().String() == "unsafe.Pointer" {
- return true
- }
- if (v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr) && v.IsNil() {
- return true
- }
- return false
-}
-
-func isNum(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
- return true
- }
- return false
-}
-
-// equal returns true when lhsV and rhsV is same value.
-func equal(lhsV, rhsV reflect.Value) bool {
- lhsIsNil, rhsIsNil := isNil(lhsV), isNil(rhsV)
- if lhsIsNil && rhsIsNil {
- return true
- }
- if (!lhsIsNil && rhsIsNil) || (lhsIsNil && !rhsIsNil) {
- return false
- }
- if lhsV.Kind() == reflect.Interface || lhsV.Kind() == reflect.Ptr {
- lhsV = lhsV.Elem()
- }
- if rhsV.Kind() == reflect.Interface || rhsV.Kind() == reflect.Ptr {
- rhsV = rhsV.Elem()
- }
- if !lhsV.IsValid() || !rhsV.IsValid() {
- return true
- }
- if isNum(lhsV) && isNum(rhsV) {
- if rhsV.Type().ConvertibleTo(lhsV.Type()) {
- rhsV = rhsV.Convert(lhsV.Type())
- }
- }
- if lhsV.CanInterface() && rhsV.CanInterface() {
- return reflect.DeepEqual(lhsV.Interface(), rhsV.Interface())
- }
- return reflect.DeepEqual(lhsV, rhsV)
-}
-
-// toInt64 converts all reflect.Value-s into int64.
-func toInt64(v reflect.Value) int64 {
- if v.Kind() == reflect.Interface {
- v = v.Elem()
- }
- switch v.Kind() {
- case reflect.Float32, reflect.Float64:
- return int64(v.Float())
- case reflect.Int, reflect.Int32, reflect.Int64:
- return v.Int()
- case reflect.String:
- s := v.String()
- var i int64
- var err error
- if strings.HasPrefix(s, "0x") {
- i, err = strconv.ParseInt(s, 16, 64)
- } else {
- i, err = strconv.ParseInt(s, 10, 64)
- }
- if err == nil {
- return int64(i)
- }
- }
- return 0
-}
-
-func invokeLetExpr(expr ast.Expr, rv reflect.Value, env *Env) (reflect.Value, error) {
- switch lhs := expr.(type) {
- case *ast.IdentExpr:
- if env.Set(lhs.Lit, rv) != nil {
- if strings.Contains(lhs.Lit, ".") {
- return NilValue, fmt.Errorf("Undefined symbol '%s'", lhs.Lit)
- }
- env.Define(lhs.Lit, rv)
- }
- return rv, nil
- }
- return NilValue, errors.New("Invalid operation")
-}
-
-// invokeExpr evaluates one expression.
-func invokeExpr(expr ast.Expr, env *Env) (reflect.Value, error) {
- switch e := expr.(type) {
- case *ast.NumberExpr:
- if strings.Contains(e.Lit, ".") || strings.Contains(e.Lit, "e") {
- v, err := strconv.ParseFloat(e.Lit, 64)
- if err != nil {
- return NilValue, err
- }
- return reflect.ValueOf(float64(v)), nil
- }
- var i int64
- var err error
- if strings.HasPrefix(e.Lit, "0x") {
- i, err = strconv.ParseInt(e.Lit[2:], 16, 64)
- } else {
- i, err = strconv.ParseInt(e.Lit, 10, 64)
- }
- if err != nil {
- return NilValue, err
- }
- return reflect.ValueOf(i), nil
- case *ast.IdentExpr:
- return env.Get(e.Lit)
- case *ast.StringExpr:
- return reflect.ValueOf(e.Lit), nil
- case *ast.UnaryExpr:
- v, err := invokeExpr(e.Expr, env)
- if err != nil {
- return v, err
- }
- switch e.Operator {
- case "-":
- if v.Kind() == reflect.Float64 {
- return reflect.ValueOf(-v.Float()), nil
- }
- return reflect.ValueOf(-v.Int()), nil
- case "^":
- return reflect.ValueOf(^toInt64(v)), nil
- case "!":
- return reflect.ValueOf(!toBool(v)), nil
- default:
- return NilValue, errors.New("Unknown operator ''")
- }
- case *ast.ParenExpr:
- v, err := invokeExpr(e.SubExpr, env)
- if err != nil {
- return v, err
- }
- return v, nil
- case *ast.BinOpExpr:
- lhsV := NilValue
- rhsV := NilValue
- var err error
-
- lhsV, err = invokeExpr(e.Lhs, env)
- if err != nil {
- return lhsV, err
- }
- if lhsV.Kind() == reflect.Interface {
- lhsV = lhsV.Elem()
- }
- if e.Rhs != nil {
- rhsV, err = invokeExpr(e.Rhs, env)
- if err != nil {
- return rhsV, err
- }
- if rhsV.Kind() == reflect.Interface {
- rhsV = rhsV.Elem()
- }
- }
- switch e.Operator {
- case "+":
- if lhsV.Kind() == reflect.String || rhsV.Kind() == reflect.String {
- return reflect.ValueOf(toString(lhsV) + toString(rhsV)), nil
- }
- if (lhsV.Kind() == reflect.Array || lhsV.Kind() == reflect.Slice) && (rhsV.Kind() != reflect.Array && rhsV.Kind() != reflect.Slice) {
- return reflect.Append(lhsV, rhsV), nil
- }
- if (lhsV.Kind() == reflect.Array || lhsV.Kind() == reflect.Slice) && (rhsV.Kind() == reflect.Array || rhsV.Kind() == reflect.Slice) {
- return reflect.AppendSlice(lhsV, rhsV), nil
- }
- if lhsV.Kind() == reflect.Float64 || rhsV.Kind() == reflect.Float64 {
- return reflect.ValueOf(toFloat64(lhsV) + toFloat64(rhsV)), nil
- }
- return reflect.ValueOf(toInt64(lhsV) + toInt64(rhsV)), nil
- case "-":
- if lhsV.Kind() == reflect.Float64 || rhsV.Kind() == reflect.Float64 {
- return reflect.ValueOf(toFloat64(lhsV) - toFloat64(rhsV)), nil
- }
- return reflect.ValueOf(toInt64(lhsV) - toInt64(rhsV)), nil
- case "*":
- if lhsV.Kind() == reflect.String && (rhsV.Kind() == reflect.Int || rhsV.Kind() == reflect.Int32 || rhsV.Kind() == reflect.Int64) {
- return reflect.ValueOf(strings.Repeat(toString(lhsV), int(toInt64(rhsV)))), nil
- }
- if lhsV.Kind() == reflect.Float64 || rhsV.Kind() == reflect.Float64 {
- return reflect.ValueOf(toFloat64(lhsV) * toFloat64(rhsV)), nil
- }
- return reflect.ValueOf(toInt64(lhsV) * toInt64(rhsV)), nil
- case "/":
- return reflect.ValueOf(toFloat64(lhsV) / toFloat64(rhsV)), nil
- case "%":
- return reflect.ValueOf(toInt64(lhsV) % toInt64(rhsV)), nil
- case "==":
- return reflect.ValueOf(equal(lhsV, rhsV)), nil
- case "!=":
- return reflect.ValueOf(equal(lhsV, rhsV) == false), nil
- case ">":
- return reflect.ValueOf(toFloat64(lhsV) > toFloat64(rhsV)), nil
- case ">=":
- return reflect.ValueOf(toFloat64(lhsV) >= toFloat64(rhsV)), nil
- case "<":
- return reflect.ValueOf(toFloat64(lhsV) < toFloat64(rhsV)), nil
- case "<=":
- return reflect.ValueOf(toFloat64(lhsV) <= toFloat64(rhsV)), nil
- case "|":
- return reflect.ValueOf(toInt64(lhsV) | toInt64(rhsV)), nil
- case "||":
- if toBool(lhsV) {
- return lhsV, nil
- }
- return rhsV, nil
- case "&":
- return reflect.ValueOf(toInt64(lhsV) & toInt64(rhsV)), nil
- case "&&":
- if toBool(lhsV) {
- return rhsV, nil
- }
- return lhsV, nil
- case "**":
- if lhsV.Kind() == reflect.Float64 {
- return reflect.ValueOf(math.Pow(toFloat64(lhsV), toFloat64(rhsV))), nil
- }
- return reflect.ValueOf(int64(math.Pow(toFloat64(lhsV), toFloat64(rhsV)))), nil
- case ">>":
- return reflect.ValueOf(toInt64(lhsV) >> uint64(toInt64(rhsV))), nil
- case "<<":
- return reflect.ValueOf(toInt64(lhsV) << uint64(toInt64(rhsV))), nil
- default:
- return NilValue, errors.New("Unknown operator")
- }
- case *ast.CallExpr:
- f, err := env.Get(e.Name)
- if err != nil {
- return f, err
- }
-
- args := []reflect.Value{}
- for i, expr := range e.SubExprs {
- arg, err := invokeExpr(expr, env)
- if err != nil {
- return arg, err
- }
-
- if i < f.Type().NumIn() {
- if !f.Type().IsVariadic() {
- it := f.Type().In(i)
- if arg.Kind().String() == "unsafe.Pointer" {
- arg = reflect.New(it).Elem()
- }
- if arg.Kind() != it.Kind() && arg.IsValid() && arg.Type().ConvertibleTo(it) {
- arg = arg.Convert(it)
- } else if arg.Kind() == reflect.Func {
- if _, isFunc := arg.Interface().(Func); isFunc {
- rfunc := arg
- arg = reflect.MakeFunc(it, func(args []reflect.Value) []reflect.Value {
- for i := range args {
- args[i] = reflect.ValueOf(args[i])
- }
- return rfunc.Call(args)[:it.NumOut()]
- })
- }
- } else if !arg.IsValid() {
- arg = reflect.Zero(it)
- }
- }
- }
- if !arg.IsValid() {
- arg = NilValue
- }
-
- args = append(args, arg)
- }
- ret := NilValue
- fnc := func() {
- defer func() {
- if os.Getenv("KINAKO_DEBUG") == "" {
- if ex := recover(); ex != nil {
- if e, ok := ex.(error); ok {
- err = e
- } else {
- err = errors.New(fmt.Sprint(ex))
- }
- }
- }
- }()
- if f.Kind() == reflect.Interface {
- f = f.Elem()
- }
- rets := f.Call(args)
- if f.Type().NumOut() == 1 {
- ret = rets[0]
- } else {
- var result []interface{}
- for _, r := range rets {
- result = append(result, r.Interface())
- }
- ret = reflect.ValueOf(result)
- }
- }
- fnc()
- if err != nil {
- return ret, err
- }
- return ret, nil
- case *ast.TernaryOpExpr:
- rv, err := invokeExpr(e.Expr, env)
- if err != nil {
- return rv, err
- }
- if toBool(rv) {
- lhsV, err := invokeExpr(e.Lhs, env)
- if err != nil {
- return lhsV, err
- }
- return lhsV, nil
- }
- rhsV, err := invokeExpr(e.Rhs, env)
- if err != nil {
- return rhsV, err
- }
- return rhsV, nil
- default:
- return NilValue, errors.New("Unknown expression")
- }
-}
diff --git a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm_test.go b/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm_test.go
deleted file mode 100644
index 929d23f9fc..0000000000
--- a/vendor/github.com/leonelquinteros/gotext/vendor/github.com/mattn/kinako/vm/vm_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package vm
-
-import (
- "reflect"
- "testing"
-)
-
-func TestExecute(t *testing.T) {
- e := NewEnv()
- e.Define("foo", int64(1))
- e.Define("bar", int64(2))
- e.Define("baz", int64(3))
-
- tests := []struct {
- input string
- want interface{}
- }{
- {
- input: "foo+bar",
- want: int64(3),
- },
- {
- input: "foo-bar",
- want: int64(-1),
- },
- {
- input: "foo*bar",
- want: int64(2),
- },
- {
- input: "foo/bar",
- want: float64(0.5),
- },
- {
- input: "baz*(foo+bar)",
- want: int64(9),
- },
- {
- input: "baz > 2 ? foo : bar",
- want: int64(1),
- },
- }
-
- for _, tt := range tests {
- r, err := e.Execute(tt.input)
- if err != nil {
- t.Fatal(err)
- }
- got := r.Interface()
- if !reflect.DeepEqual(got, tt.want) {
- t.Fatalf("want %v, but %v:", tt.want, got)
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
index f50fc02310..5e7113ce98 100644
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ b/vendor/github.com/onsi/ginkgo/.travis.yml
@@ -1,9 +1,9 @@
language: go
go:
- - 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
+ - 1.9.x
install:
- go get -v -t ./...
@@ -12,4 +12,4 @@ install:
- go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin
-script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
+script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
index bc0c54fe2b..8559e01363 100644
--- a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
+++ b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
@@ -7,6 +7,7 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp
- If you're adding functionality to the Ginkgo library, make sure to add appropriate unit and/or integration tests (under the `integration` folder).
- If you're adding functionality to the Ginkgo CLI note that there are very few unit tests. Please add an integration test.
- Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
+ - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings
- Update the documentation. In addition to standard `godoc` comments Ginkgo has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
-Thanks for supporting Ginkgo!
\ No newline at end of file
+Thanks for supporting Ginkgo!
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index 97e9cdc426..7a3c618ae2 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,4 +1,4 @@
-![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
+![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo)
@@ -25,7 +25,7 @@ If you have a question, comment, bug report, feature request, etc. please open a
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- - `ginkgo -cover` runs your tests using Golang's code coverage tool
+ - `ginkgo -cover` runs your tests using Go's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- `ginkgo -r` runs all tests suites under the current directory
@@ -55,18 +55,18 @@ If you have a question, comment, bug report, feature request, etc. please open a
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
-## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
+## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
## Set Me Up!
-You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
+You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
```bash
-go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
-go get github.com/onsi/gomega # fetches the matcher library
+go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
+go get -u github.com/onsi/gomega/... # fetches the matcher library
cd path/to/package/you/want/to/test
@@ -85,11 +85,11 @@ Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Go
With that said, it's great to know what your options are :)
-### What Golang gives you out of the box
+### What Go gives you out of the box
-Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
+Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
-### Matcher libraries for Golang's XUnit style tests
+### Matcher libraries for Go's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
@@ -100,7 +100,7 @@ You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomeg
### BDD style testing frameworks
-There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
+There are a handful of BDD-style testing frameworks written for Go. Here are a few:
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
- [GoConvey](https://github.com/smartystreets/goconvey)
@@ -108,7 +108,7 @@ There are a handful of BDD-style testing frameworks written for Golang. Here ar
- [Mao](https://github.com/azer/mao)
- [Zen](https://github.com/pranavraja/zen)
-Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
+Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
Go explore!
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 60d5ea22e8..b6e1248bd9 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -47,6 +47,7 @@ type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
+ NoisySkippings bool
Succinct bool
Verbose bool
FullTrace bool
@@ -90,6 +91,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
+ flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
@@ -171,6 +173,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
+ if !reporter.NoisySkippings {
+ result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
+ }
+
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
index 1e209e4f52..fea4d4d4e6 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
@@ -43,10 +43,10 @@ func BuildBootstrapCommand() *Command {
var bootstrapText = `package {{.Package}}
import (
+ "testing"
+
{{.GinkgoImport}}
{{.GomegaImport}}
-
- "testing"
)
func Test{{.FormattedName}}(t *testing.T) {
@@ -58,11 +58,11 @@ func Test{{.FormattedName}}(t *testing.T) {
var agoutiBootstrapText = `package {{.Package}}
import (
+ "testing"
+
{{.GinkgoImport}}
{{.GomegaImport}}
"github.com/sclevine/agouti"
-
- "testing"
)
func Test{{.FormattedName}}(t *testing.T) {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
index 89e60d3930..5944ed85cc 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
@@ -3,8 +3,9 @@ package main
import (
"flag"
"fmt"
- "github.com/onsi/ginkgo/ginkgo/convert"
"os"
+
+ "github.com/onsi/ginkgo/ginkgo/convert"
)
func BuildConvertCommand() *Command {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
index 3b9405aa07..019fd23373 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
@@ -34,10 +34,10 @@ func BuildGenerateCommand() *Command {
var specText = `package {{.Package}}
import (
- {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
-
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
+
+ {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
@@ -45,15 +45,15 @@ var _ = Describe("{{.Subject}}", func() {
})
`
-var agoutiSpecText = `package {{.Package}}_test
+var agoutiSpecText = `package {{.Package}}
import (
- {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
-
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
- . "github.com/sclevine/agouti/matchers"
"github.com/sclevine/agouti"
+ . "github.com/sclevine/agouti/matchers"
+
+ {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go
index 37260a89f2..1470b7478c 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go
@@ -1,8 +1,9 @@
package nodot_test
import (
- . "github.com/onsi/ginkgo/ginkgo/nodot"
"strings"
+
+ . "github.com/onsi/ginkgo/ginkgo/nodot"
)
var _ = Describe("ApplyNoDot", func() {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
index 212235bae0..39b88b5d1b 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
@@ -3,11 +3,12 @@ package main
import (
"bufio"
"flag"
- "github.com/onsi/ginkgo/ginkgo/nodot"
"io/ioutil"
"os"
"path/filepath"
"regexp"
+
+ "github.com/onsi/ginkgo/ginkgo/nodot"
)
func BuildNodotCommand() *Command {
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
index cdca3a348b..f586908e87 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
@@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
+
"github.com/onsi/ginkgo/config"
)
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 8befd35ad9..9ab1c9a943 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -233,7 +233,7 @@ func buildDefaultReporter() Reporter {
}
}
-//Skip notifies Ginkgo that the current spec should be skipped.
+//Skip notifies Ginkgo that the current spec was skipped.
func Skip(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
@@ -362,22 +362,26 @@ func XIt(text string, _ ...interface{}) bool {
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
//which apply to It blocks.
func Specify(text string, body interface{}, timeout ...float64) bool {
- return It(text, body, timeout...)
+ globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
+ return true
}
//You can focus individual Specifys using FSpecify
func FSpecify(text string, body interface{}, timeout ...float64) bool {
- return FIt(text, body, timeout...)
+ globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
+ return true
}
//You can mark Specifys as pending using PSpecify
func PSpecify(text string, is ...interface{}) bool {
- return PIt(text, is...)
+ globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+ return true
}
//You can mark Specifys as pending using XSpecify
func XSpecify(text string, is ...interface{}) bool {
- return XIt(text, is...)
+ globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
+ return true
}
//By allows you to better document large Its.
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go
index d3d35e817a..721d0f2c37 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go
@@ -1,10 +1,10 @@
package nested_test
import (
+ "testing"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
-
- "testing"
)
func TestNested(t *testing.T) {
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go
index fd69d3336d..9ea2291351 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go
@@ -1,10 +1,10 @@
package tmp_test
import (
+ "testing"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
-
- "testing"
)
func TestConvertFixtures(t *testing.T) {
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go
index 801acf135f..ea6f71ca9e 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go
@@ -97,3 +97,7 @@ var _ = Describe("Excercising different failure modes", func() {
println("NEVER SEE THIS")
}, 1)
})
+
+var _ = Specify("a top level specify", func() {
+ Fail("fail the test")
+})
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go
index 5cd9b72f7a..8f65878752 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go
@@ -3,11 +3,12 @@ package flags_test
import (
"flag"
"fmt"
+ remapped "math"
+ "time"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/integration/_fixtures/flags_tests"
. "github.com/onsi/gomega"
- "time"
- remapped "math"
)
var customFlag string
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go
index 1728dcbe88..6a5a070e12 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/hanging_suite/hanging_suite_test.go
@@ -3,6 +3,7 @@ package hanging_suite_test
import (
"fmt"
"time"
+
. "github.com/onsi/ginkgo"
)
diff --git a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go b/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go
index 72aeb8f475..e406aeb46d 100644
--- a/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/_fixtures/skip_fixture/skip_fixture_test.go
@@ -48,7 +48,6 @@ var _ = Describe("Excercising different skip modes", func() {
}, 1)
})
-
var _ = Describe("SKIP in a BeforeEach", func() {
BeforeEach(func() {
Skip("a BeforeEach SKIP")
@@ -69,4 +68,4 @@ var _ = Describe("SKIP in an AfterEach", func() {
It("a SKIP AfterEach", func() {
Expect(true).To(BeTrue())
})
-})
\ No newline at end of file
+})
diff --git a/vendor/github.com/onsi/ginkgo/integration/coverage_test.go b/vendor/github.com/onsi/ginkgo/integration/coverage_test.go
index 76385282ce..09318a9c8a 100644
--- a/vendor/github.com/onsi/ginkgo/integration/coverage_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/coverage_test.go
@@ -5,6 +5,7 @@ import (
"os/exec"
"fmt"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
@@ -135,13 +136,13 @@ var _ = Describe("Coverage Specs", func() {
coverFile := fmt.Sprintf("./_fixtures/combined_coverage_fixture/%s.coverprofile", p)
// Cleanup
- defer func (f string) {
+ defer func(f string) {
os.RemoveAll(f)
- } (coverFile)
+ }(coverFile)
- defer func (f string) {
+ defer func(f string) {
os.RemoveAll(fmt.Sprintf("./_fixtures/combined_coverage_fixture/%s/coverage.txt", f))
- } (p)
+ }(p)
_, err := os.Stat(coverFile)
diff --git a/vendor/github.com/onsi/ginkgo/integration/fail_test.go b/vendor/github.com/onsi/ginkgo/integration/fail_test.go
index 8dcf5e4ad9..7b1641229d 100644
--- a/vendor/github.com/onsi/ginkgo/integration/fail_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/fail_test.go
@@ -43,6 +43,13 @@ var _ = Describe("Failing Specs", func() {
Ω(output).Should(ContainSubstring("a measure FAIL failure"))
Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a measure panic`))
- Ω(output).Should(ContainSubstring("0 Passed | 16 Failed"))
+ Ω(output).Should(ContainSubstring("a top level specify"))
+ Ω(output).ShouldNot(ContainSubstring("ginkgo_dsl.go"))
+ // depending on the go version this could be the first line of the Specify
+ // block (>= go1.9) or the last line of the Specify block (< go1.9)
+ Ω(output).Should(Or(ContainSubstring("fail_fixture_test.go:101"), ContainSubstring("fail_fixture_test.go:103")))
+ Ω(output).Should(ContainSubstring("fail_fixture_test.go:102"))
+
+ Ω(output).Should(ContainSubstring("0 Passed | 17 Failed"))
})
})
diff --git a/vendor/github.com/onsi/ginkgo/integration/flags_test.go b/vendor/github.com/onsi/ginkgo/integration/flags_test.go
index e34102d9f5..67703c7e41 100644
--- a/vendor/github.com/onsi/ginkgo/integration/flags_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/flags_test.go
@@ -164,7 +164,7 @@ var _ = Describe("Flags Specs", func() {
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("1 Failed"))
- Ω(output).Should(ContainSubstring("15 Skipped"))
+ Ω(output).Should(ContainSubstring("16 Skipped"))
})
Context("with a flaky test", func() {
@@ -187,7 +187,7 @@ var _ = Describe("Flags Specs", func() {
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("synchronous failures"))
- Ω(output).Should(ContainSubstring("16 Specs"))
+ Ω(output).Should(ContainSubstring("17 Specs"))
Ω(output).Should(ContainSubstring("0 Passed"))
Ω(output).Should(ContainSubstring("0 Failed"))
})
diff --git a/vendor/github.com/onsi/ginkgo/integration/watch_test.go b/vendor/github.com/onsi/ginkgo/integration/watch_test.go
index c18e8f8718..8f3c0910f1 100644
--- a/vendor/github.com/onsi/ginkgo/integration/watch_test.go
+++ b/vendor/github.com/onsi/ginkgo/integration/watch_test.go
@@ -41,7 +41,7 @@ var _ = Describe("Watch", func() {
startGinkgoWithGopath := func(args ...string) *gexec.Session {
cmd := ginkgoCommand(rootPath, args...)
- cmd.Env = append([]string{"GOPATH=" + rootPath + ":" + os.Getenv("GOPATH")}, os.Environ()...)
+ os.Setenv("GOPATH", rootPath+":"+os.Getenv("GOPATH"))
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Ω(err).ShouldNot(HaveOccurred())
return session
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
index 55a9e9d036..cca75a4499 100644
--- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go
@@ -1,11 +1,12 @@
package codelocation_test
import (
+ "runtime"
+
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/types"
. "github.com/onsi/gomega"
- "runtime"
)
var _ = Describe("CodeLocation", func() {
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
index b8d61b13fc..11ac9b70b1 100644
--- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node_test.go
@@ -1,9 +1,10 @@
package containernode_test
import (
- "github.com/onsi/ginkgo/internal/leafnodes"
"math/rand"
+ "github.com/onsi/ginkgo/internal/leafnodes"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
index 9c3eed2b6f..d6d54234c2 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
@@ -35,15 +35,15 @@ func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elaps
}
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
b.mu.Lock()
+ measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
defer b.mu.Unlock()
measurement.Results = append(measurement.Results, value)
}
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
b.mu.Lock()
+ measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
defer b.mu.Unlock()
measurement.Results = append(measurement.Results, value)
}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
index c76fe3a451..6eded7b763 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
@@ -1,9 +1,10 @@
package leafnodes
import (
+ "time"
+
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "time"
)
type ItNode struct {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
index efc3348c1b..3ab9a6d552 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
@@ -1,9 +1,10 @@
package leafnodes
import (
+ "reflect"
+
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "reflect"
)
type MeasureNode struct {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
index 252065b871..c32c096514 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go
@@ -5,10 +5,11 @@ import (
. "github.com/onsi/ginkgo/internal/leafnodes"
. "github.com/onsi/gomega"
+ "time"
+
"github.com/onsi/ginkgo/internal/codelocation"
Failer "github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "time"
)
var _ = Describe("Measure Nodes", func() {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
index 870ad826da..8b6518b5c8 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
@@ -2,11 +2,12 @@ package leafnodes
import (
"fmt"
+ "reflect"
+ "time"
+
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "reflect"
- "time"
)
type runner struct {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
index 6b725a6315..b4654cd299 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
@@ -1,9 +1,10 @@
package leafnodes
import (
+ "time"
+
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "time"
)
type SetupNode struct {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
index fcccef488b..6f750ff61f 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go
@@ -179,8 +179,8 @@ func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time
didRun = true
failer.Fail("bam", innerCodeLocation)
time.Sleep(20 * time.Millisecond)
+ defer close(done)
panic("doesn't matter")
- close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
@@ -208,8 +208,8 @@ func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time
didRun = true
time.Sleep(20 * time.Millisecond)
close(guard)
+ defer close(done)
panic("doesn't matter")
- close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
index 2ccc7dc0fb..80f16ed786 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
@@ -1,9 +1,10 @@
package leafnodes
import (
+ "time"
+
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "time"
)
type SuiteNode interface {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
index e7030d9149..a721d0cf7f 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
@@ -2,11 +2,12 @@ package leafnodes
import (
"encoding/json"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
)
type synchronizedAfterSuiteNode struct {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
index 4266a4bce6..edbdf6ae59 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go
@@ -1,18 +1,21 @@
package leafnodes_test
import (
+ "sync"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/types"
. "github.com/onsi/gomega"
- "sync"
- "github.com/onsi/gomega/ghttp"
"net/http"
+ "github.com/onsi/gomega/ghttp"
+
+ "time"
+
"github.com/onsi/ginkgo/internal/codelocation"
Failer "github.com/onsi/ginkgo/internal/failer"
- "time"
)
var _ = Describe("SynchronizedAfterSuiteNode", func() {
@@ -132,19 +135,19 @@ var _ = Describe("SynchronizedAfterSuiteNode", func() {
func(writer http.ResponseWriter, request *http.Request) {
ranThing("Request1")
},
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}),
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
), ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
func(writer http.ResponseWriter, request *http.Request) {
ranThing("Request2")
},
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}),
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: false}),
), ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"),
func(writer http.ResponseWriter, request *http.Request) {
ranThing("Request3")
},
- ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{true}),
+ ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{CanRun: true}),
))
node = newNode(func() {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
index 76a9679813..d5c8893194 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
@@ -3,12 +3,13 @@ package leafnodes
import (
"bytes"
"encoding/json"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"reflect"
"time"
+
+ "github.com/onsi/ginkgo/internal/failer"
+ "github.com/onsi/ginkgo/types"
)
type synchronizedBeforeSuiteNode struct {
@@ -109,8 +110,6 @@ func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecSt
time.Sleep(50 * time.Millisecond)
}
-
- return types.SpecStateFailed, failure("Shouldn't get here!")
}
func (node *synchronizedBeforeSuiteNode) Passed() bool {
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
index dbf2426748..46c3e276b6 100644
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go
@@ -5,13 +5,15 @@ import (
. "github.com/onsi/ginkgo/internal/leafnodes"
. "github.com/onsi/gomega"
- "github.com/onsi/gomega/ghttp"
"net/http"
+ "github.com/onsi/gomega/ghttp"
+
+ "time"
+
"github.com/onsi/ginkgo/internal/codelocation"
Failer "github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
- "time"
)
var _ = Describe("SynchronizedBeforeSuiteNode", func() {
@@ -176,7 +178,7 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
Context("when A succeeds", func() {
BeforeEach(func() {
- expectedState = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed}
+ expectedState = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
node = newNode(func() []byte {
return []byte("my data")
@@ -200,11 +202,10 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
Context("when A fails", func() {
BeforeEach(func() {
- expectedState = types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateFailed}
+ expectedState = types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateFailed}
node = newNode(func() []byte {
panic("BAM")
- return []byte("my data")
}, func([]byte) {
ranB = true
})
@@ -238,10 +239,10 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
server.AppendHandlers(ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
- ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())),
+ ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
), ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
- ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())),
+ ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}).ToJSON())),
), ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", "/BeforeSuiteState"),
ghttp.RespondWithJSONEncodedPtr(&statusCode, &response),
@@ -259,7 +260,7 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
Context("when A on node1 succeeds", func() {
BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed}
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStatePassed}
outcome = node.Run(parallelNode, parallelTotal, server.URL())
})
@@ -283,7 +284,7 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
Context("when A on node1 fails", func() {
BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateFailed}
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateFailed}
outcome = node.Run(parallelNode, parallelTotal, server.URL())
})
@@ -315,7 +316,7 @@ var _ = Describe("SynchronizedBeforeSuiteNode", func() {
Context("when node1 disappears", func() {
BeforeEach(func() {
- response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateDisappeared}
+ response = types.RemoteBeforeSuiteData{Data: []byte("my data"), State: types.RemoteBeforeSuiteStateDisappeared}
outcome = node.Run(parallelNode, parallelTotal, server.URL())
})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
index 522d44e357..6b54afe014 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
@@ -207,7 +207,7 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
case types.SpecStatePending:
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
case types.SpecStateSkipped:
- aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
+ aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
case types.SpecStateTimedOut:
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStatePanicked:
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
index e5f3b1e307..be7768e70e 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go
@@ -2,6 +2,7 @@ package remote_test
import (
"encoding/json"
+
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/ginkgo/internal/remote"
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
index 297af2ebff..367c54daff 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
@@ -45,7 +45,7 @@ func NewServer(parallelTotal int) (*Server, error) {
listener: listener,
lock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
- beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
+ beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
parallelTotal: parallelTotal,
}, nil
}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
index eb2eefebe0..569fafae09 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/server_test.go
@@ -146,10 +146,10 @@ var _ = Describe("Server", func() {
Context("when the first node's Alive has not been registered yet", func() {
It("should return pending", func() {
state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
})
})
@@ -162,10 +162,10 @@ var _ = Describe("Server", func() {
It("should return pending", func() {
state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}))
})
})
@@ -198,10 +198,10 @@ var _ = Describe("Server", func() {
It("should return disappeared", func() {
state := getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
state = getBeforeSuite()
- Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared}))
+ Ω(state).Should(Equal(types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStateDisappeared}))
})
})
})
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
index 5c59728ea9..9550d37b36 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
@@ -8,4 +8,4 @@ import "syscall"
// use the nearly identical syscall.Dup3 instead
func syscallDup(oldfd int, newfd int) (err error) {
return syscall.Dup3(oldfd, newfd, 0)
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
index ecf9cafb66..75ef7fb78e 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
@@ -6,4 +6,4 @@ import "golang.org/x/sys/unix"
func syscallDup(oldfd int, newfd int) (err error) {
return unix.Dup2(oldfd, newfd)
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
index cacdd0e649..ef62559600 100644
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
+++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
@@ -8,4 +8,4 @@ import "syscall"
func syscallDup(oldfd int, newfd int) (err error) {
return syscall.Dup2(oldfd, newfd)
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
index 54e61ecb46..99f548bca4 100644
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
@@ -2,7 +2,6 @@ package spec_iterator
import (
"encoding/json"
- "errors"
"fmt"
"net/http"
@@ -31,7 +30,7 @@ func (s *ParallelIterator) Next() (*spec.Spec, error) {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode))
+ return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
}
var counter Counter
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
index d8e05a2d60..c5a762fd5d 100644
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator_test.go
@@ -64,10 +64,10 @@ var _ = Describe("ParallelSpecIterator", func() {
Describe("when the server returns well-formed responses", func() {
BeforeEach(func() {
server.AppendHandlers(
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{0}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{1}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{3}),
- ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{4}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 0}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 1}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 3}),
+ ghttp.RespondWithJSONEncoded(http.StatusOK, Counter{Index: 4}),
)
})
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
index 77272e8b86..b16a152e3b 100644
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
+++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go
@@ -18,7 +18,6 @@ import (
)
var noneFlag = types.FlagTypeNone
-var focusedFlag = types.FlagTypeFocused
var pendingFlag = types.FlagTypePending
var _ = Describe("Spec Runner", func() {
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
index fb82f70a6d..ac58dd5f7a 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
@@ -66,7 +66,7 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
case types.SpecStatePending:
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
case types.SpecStateSkipped:
- reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
+ reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
case types.SpecStateTimedOut:
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStatePanicked:
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go
index 01528448ea..2dcf276d34 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter_test.go
@@ -28,6 +28,7 @@ var _ = Describe("DefaultReporter", func() {
NoColor: false,
SlowSpecThreshold: 0.1,
NoisyPendings: false,
+ NoisySkippings: false,
Verbose: true,
FullTrace: true,
}
@@ -258,8 +259,8 @@ var _ = Describe("DefaultReporter", func() {
spec.State = types.SpecStateSkipped
})
- It("should announce the skipped spec", func() {
- Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, false, true)))
+ It("should announce the skipped spec, succinctly", func() {
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, true, true)))
})
})
@@ -311,6 +312,24 @@ var _ = Describe("DefaultReporter", func() {
})
})
+ Context("in noisy skippings mode", func() {
+ BeforeEach(func() {
+ reporterConfig.Succinct = false
+ reporterConfig.NoisySkippings = true
+ reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
+ })
+
+ Context("When the spec is skipped", func() {
+ BeforeEach(func() {
+ spec.State = types.SpecStateSkipped
+ })
+
+ It("should announce the skipped spec, noisily", func() {
+ Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, false, true)))
+ })
+ })
+ })
+
Context("in succinct mode", func() {
BeforeEach(func() {
reporterConfig.Succinct = true
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
index 657dfe726e..36ee2a6005 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
@@ -10,10 +10,11 @@ package reporters
import (
"fmt"
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
"io"
"strings"
+
+ "github.com/onsi/ginkgo/config"
+ "github.com/onsi/ginkgo/types"
)
const (
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go
index de87732113..b45d5db01a 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go
+++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go
@@ -3,13 +3,14 @@ package reporters_test
import (
"bytes"
"fmt"
+ "time"
+
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
. "github.com/onsi/gomega"
- "time"
)
var _ = Describe("TeamCity Reporter", func() {
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
index 61d0f41faf..6b31f614d7 100644
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ b/vendor/github.com/onsi/gomega/.travis.yml
@@ -3,10 +3,11 @@ go:
- 1.6
- 1.7
- 1.8
+ - 1.9
install:
- go get -v ./...
- go get github.com/onsi/ginkgo
- go install github.com/onsi/ginkgo/ginkgo
-script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
+script: $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race && go vet
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index a3e8ee4447..06fe7ad08d 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,5 +1,19 @@
## HEAD
+## 1.3.0
+
+Improvements:
+
+- The `Equal` matcher matches byte slices more performantly.
+- Improved how `MatchError` matches error strings.
+- `MatchXML` ignores the order of xml node attributes.
+- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254))
+
+Bug Fixes:
+
+- Diff generation now handles multi-byte sequences correctly.
+- Multiple goroutines can now call `gexec.Build` concurrently.
+
## 1.2.0
Improvements:
diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md
index 73d4020e6b..0d7a099289 100644
--- a/vendor/github.com/onsi/gomega/CONTRIBUTING.md
+++ b/vendor/github.com/onsi/gomega/CONTRIBUTING.md
@@ -6,6 +6,9 @@ Your contributions to Gomega are essential for its long-term maintenance and imp
- Ensure adequate test coverage:
- Make sure to add appropriate unit tests
- Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
+ - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings
- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
-Thanks for supporting Gomega!
\ No newline at end of file
+If you're a committer, check out RELEASING.md to learn how to cut a release.
+
+Thanks for supporting Gomega!
diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md
new file mode 100644
index 0000000000..537050a9ac
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/RELEASING.md
@@ -0,0 +1,6 @@
+A Gomega release is a tagged sha and a GitHub release. To cut a release:
+
+1. Ensure CHANGELOG.md is uptodate.
+2. Update GOMEGA_VERSION in `gomega_dsl.go`
+3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
+4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index e206ee59a4..22c8751a3b 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -123,7 +123,7 @@ func findFirstMismatch(a, b string) int {
bSlice := strings.Split(b, "")
for index, str := range aSlice {
- if index > len(b) - 1 {
+ if index > len(bSlice)-1 {
return index
}
if str != bSlice[index] {
diff --git a/vendor/github.com/onsi/gomega/format/format_test.go b/vendor/github.com/onsi/gomega/format/format_test.go
index a1a9031640..b23fcf795f 100644
--- a/vendor/github.com/onsi/gomega/format/format_test.go
+++ b/vendor/github.com/onsi/gomega/format/format_test.go
@@ -159,6 +159,13 @@ var _ = Describe("Format", func() {
Ω(MessageWithDiff(stringWithB, "to equal", stringWithZ)).Should(Equal(expectedTruncatedEndStringFailureMessage))
})
+
+ It("handles multi-byte sequences correctly", func() {
+ stringA := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz1"
+ stringB := "• abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+ Ω(MessageWithDiff(stringA, "to equal", stringB)).Should(Equal(expectedTruncatedMultiByteFailureMessage))
+ })
})
Describe("IndentString", func() {
@@ -588,3 +595,9 @@ Expected
to equal |
: "...aaaaa"
`)
+var expectedTruncatedMultiByteFailureMessage = strings.TrimSpace(`
+Expected
+ : "...tuvwxyz1"
+to equal |
+ : "...tuvwxyz"
+`)
diff --git a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go
index 63fb3b3b8f..1361720b26 100644
--- a/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/gbytes/say_matcher_test.go
@@ -1,9 +1,10 @@
package gbytes_test
import (
- . "github.com/onsi/gomega/gbytes"
"time"
+ . "github.com/onsi/gomega/gbytes"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
diff --git a/vendor/github.com/onsi/gomega/gexec/build.go b/vendor/github.com/onsi/gomega/gexec/build.go
index d11b2fd8a3..869c1ead86 100644
--- a/vendor/github.com/onsi/gomega/gexec/build.go
+++ b/vendor/github.com/onsi/gomega/gexec/build.go
@@ -3,12 +3,14 @@ package gexec
import (
"errors"
"fmt"
+ "go/build"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
+ "strings"
"sync"
)
@@ -21,17 +23,18 @@ var (
Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory.
A path pointing to this binary is returned.
-Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`.
+Build uses the $GOPATH set in your environment. If $GOPATH is not set and you are using Go 1.8+,
+it will use the default GOPATH instead. It passes the variadic args on to `go build`.
*/
func Build(packagePath string, args ...string) (compiledPath string, err error) {
- return doBuild(os.Getenv("GOPATH"), packagePath, nil, args...)
+ return doBuild(build.Default.GOPATH, packagePath, nil, args...)
}
/*
BuildWithEnvironment is identical to Build but allows you to specify env vars to be set at build time.
*/
func BuildWithEnvironment(packagePath string, env []string, args ...string) (compiledPath string, err error) {
- return doBuild(os.Getenv("GOPATH"), packagePath, env, args...)
+ return doBuild(build.Default.GOPATH, packagePath, env, args...)
}
/*
@@ -41,6 +44,16 @@ func BuildIn(gopath string, packagePath string, args ...string) (compiledPath st
return doBuild(gopath, packagePath, nil, args...)
}
+func replaceGoPath(environ []string, newGoPath string) []string {
+ newEnviron := []string{}
+ for _, v := range environ {
+ if !strings.HasPrefix(v, "GOPATH=") {
+ newEnviron = append(newEnviron, v)
+ }
+ }
+ return append(newEnviron, "GOPATH="+newGoPath)
+}
+
func doBuild(gopath, packagePath string, env []string, args ...string) (compiledPath string, err error) {
tmpDir, err := temporaryDirectory()
if err != nil {
@@ -60,7 +73,7 @@ func doBuild(gopath, packagePath string, env []string, args ...string) (compiled
cmdArgs = append(cmdArgs, "-o", executable, packagePath)
build := exec.Command("go", cmdArgs...)
- build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...)
+ build.Env = replaceGoPath(os.Environ(), gopath)
build.Env = append(build.Env, env...)
output, err := build.CombinedOutput()
diff --git a/vendor/github.com/onsi/gomega/gexec/build_test.go b/vendor/github.com/onsi/gomega/gexec/build_test.go
index 8df0790cd5..673155ca69 100644
--- a/vendor/github.com/onsi/gomega/gexec/build_test.go
+++ b/vendor/github.com/onsi/gomega/gexec/build_test.go
@@ -57,3 +57,31 @@ var _ = Describe(".BuildWithEnvironment", func() {
Ω(os.Environ()).ShouldNot(ContainElement("GOOS=linux"))
})
})
+
+var _ = Describe(".BuildIn", func() {
+ var (
+ gopath string
+ )
+
+ BeforeEach(func() {
+ gopath = os.Getenv("GOPATH")
+ Expect(gopath).NotTo(BeEmpty())
+ Expect(os.Setenv("GOPATH", "/tmp")).To(Succeed())
+ Expect(os.Environ()).To(ContainElement("GOPATH=/tmp"))
+ })
+
+ AfterEach(func() {
+ Expect(os.Setenv("GOPATH", gopath)).To(Succeed())
+ })
+
+ It("appends the gopath env var", func() {
+ _, err := gexec.BuildIn(gopath, "github.com/onsi/gomega/gexec/_fixture/firefly/")
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("resets GOPATH to its original value", func() {
+ _, err := gexec.BuildIn(gopath, "github.com/onsi/gomega/gexec/_fixture/firefly/")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(os.Getenv("GOPATH")).To(Equal("/tmp"))
+ })
+})
diff --git a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go
index 79615ddf81..9605ab70ca 100644
--- a/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/gexec/exit_matcher_test.go
@@ -1,10 +1,11 @@
package gexec_test
import (
- . "github.com/onsi/gomega/gexec"
"os/exec"
"time"
+ . "github.com/onsi/gomega/gexec"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
diff --git a/vendor/github.com/onsi/gomega/gexec/session.go b/vendor/github.com/onsi/gomega/gexec/session.go
index 387a72cde6..b1ae2f4031 100644
--- a/vendor/github.com/onsi/gomega/gexec/session.go
+++ b/vendor/github.com/onsi/gomega/gexec/session.go
@@ -7,7 +7,6 @@ import (
"io"
"os"
"os/exec"
- "reflect"
"sync"
"syscall"
@@ -78,11 +77,11 @@ func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Sessio
commandOut, commandErr = session.Out, session.Err
- if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() {
+ if outWriter != nil {
commandOut = io.MultiWriter(commandOut, outWriter)
}
- if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() {
+ if errWriter != nil {
commandErr = io.MultiWriter(commandErr, errWriter)
}
diff --git a/vendor/github.com/onsi/gomega/gexec/session_test.go b/vendor/github.com/onsi/gomega/gexec/session_test.go
index b7841a090f..dabca9e496 100644
--- a/vendor/github.com/onsi/gomega/gexec/session_test.go
+++ b/vendor/github.com/onsi/gomega/gexec/session_test.go
@@ -1,6 +1,8 @@
package gexec_test
import (
+ "io"
+ "io/ioutil"
"os/exec"
"syscall"
"time"
@@ -16,7 +18,7 @@ var _ = Describe("Session", func() {
var command *exec.Cmd
var session *Session
- var outWriter, errWriter *Buffer
+ var outWriter, errWriter io.Writer
BeforeEach(func() {
outWriter = nil
@@ -323,22 +325,39 @@ var _ = Describe("Session", func() {
})
Context("when wrapping out and err", func() {
+ var (
+ outWriterBuffer, errWriterBuffer *Buffer
+ )
+
BeforeEach(func() {
- outWriter = NewBuffer()
- errWriter = NewBuffer()
+ outWriterBuffer = NewBuffer()
+ outWriter = outWriterBuffer
+ errWriterBuffer = NewBuffer()
+ errWriter = errWriterBuffer
})
It("should route to both the provided writers and the gbytes buffers", func() {
Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty"))
Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!"))
- Ω(outWriter.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty"))
- Ω(errWriter.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!"))
+ Ω(outWriterBuffer.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty"))
+ Ω(errWriterBuffer.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!"))
Eventually(session).Should(Exit())
- Ω(outWriter.Contents()).Should(Equal(session.Out.Contents()))
- Ω(errWriter.Contents()).Should(Equal(session.Err.Contents()))
+ Ω(outWriterBuffer.Contents()).Should(Equal(session.Out.Contents()))
+ Ω(errWriterBuffer.Contents()).Should(Equal(session.Err.Contents()))
+ })
+
+ Context("when discarding the output of the command", func() {
+ BeforeEach(func() {
+ outWriter = ioutil.Discard
+ errWriter = ioutil.Discard
+ })
+
+ It("executes succesfuly", func() {
+ Eventually(session).Should(Exit())
+ })
})
})
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 0d0f563a14..175bef5e58 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -24,11 +24,12 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.2.0"
+const GOMEGA_VERSION = "1.3.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
+Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations.
`
var globalFailHandler types.GomegaFailHandler
@@ -45,7 +46,11 @@ func RegisterFailHandler(handler types.GomegaFailHandler) {
}
//RegisterTestingT connects Gomega to Golang's XUnit style
-//Testing.T tests. You'll need to call this at the top of each XUnit style test:
+//Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead.
+//
+//Legacy Documentation:
+//
+//You'll need to call this at the top of each XUnit style test:
//
// func TestFarmHasCow(t *testing.T) {
// RegisterTestingT(t)
@@ -58,6 +63,8 @@ func RegisterFailHandler(handler types.GomegaFailHandler) {
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
//
+// NewGomegaWithT() does not have this limitation
+//
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
func RegisterTestingT(t types.GomegaTestingT) {
RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
@@ -308,6 +315,60 @@ type GomegaAssertion interface {
//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
type OmegaMatcher types.GomegaMatcher
+//GomegaWithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
+//Gomega's rich ecosystem of matchers in standard `testing` test suites.
+//
+//Use `NewGomegaWithT` to instantiate a `GomegaWithT`
+type GomegaWithT struct {
+ t types.GomegaTestingT
+}
+
+//NewGomegaWithT takes a *testing.T and returngs a `GomegaWithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
+//Gomega's rich ecosystem of matchers in standard `testing` test suits.
+//
+// func TestFarmHasCow(t *testing.T) {
+// g := GomegaWithT(t)
+//
+// f := farm.New([]string{"Cow", "Horse"})
+// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
+func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
+ return &GomegaWithT{
+ t: t,
+ }
+}
+
+//See documentation for Expect
+func (g *GomegaWithT) Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
+ return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), 0, extra...)
+}
+
+//See documentation for Eventually
+func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+ timeoutInterval := defaultEventuallyTimeout
+ pollingInterval := defaultEventuallyPollingInterval
+ if len(intervals) > 0 {
+ timeoutInterval = toDuration(intervals[0])
+ }
+ if len(intervals) > 1 {
+ pollingInterval = toDuration(intervals[1])
+ }
+ return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), timeoutInterval, pollingInterval, 0)
+}
+
+//See documentation for Consistently
+func (g *GomegaWithT) Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
+ timeoutInterval := defaultConsistentlyDuration
+ pollingInterval := defaultConsistentlyPollingInterval
+ if len(intervals) > 0 {
+ timeoutInterval = toDuration(intervals[0])
+ }
+ if len(intervals) > 1 {
+ pollingInterval = toDuration(intervals[1])
+ }
+ return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailHandler(g.t), timeoutInterval, pollingInterval, 0)
+}
+
func toDuration(input interface{}) time.Duration {
duration, ok := input.(time.Duration)
if ok {
diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go
index b9fbd6c640..fe89e3d944 100644
--- a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go
+++ b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go
@@ -3,10 +3,32 @@ package testingtsupport_test
import (
. "github.com/onsi/gomega"
+ "fmt"
"testing"
)
+type FakeT struct {
+ LastCall string
+}
+
+func (f *FakeT) Fatalf(format string, args ...interface{}) {
+ f.LastCall = fmt.Sprintf(format, args...)
+}
+
func TestTestingT(t *testing.T) {
RegisterTestingT(t)
Ω(true).Should(BeTrue())
}
+
+func TestGomegaWithT(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ f := &FakeT{}
+ testG := NewGomegaWithT(f)
+
+ testG.Expect("foo").To(Equal("foo"))
+ g.Expect(f.LastCall).To(BeZero())
+
+ testG.Expect("foo").To(Equal("bar"))
+ g.Expect(f.LastCall).To(ContainSubstring(": foo"))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index e6e85d070d..b064925c65 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -269,7 +269,7 @@ func ContainElement(element interface{}) types.GomegaMatcher {
}
}
-//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter.
+//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
@@ -366,13 +366,13 @@ func BeAnExistingFile() types.GomegaMatcher {
return &matchers.BeAnExistingFileMatcher{}
}
-//BeARegularFile succeeds iff a file exists and is a regular file.
+//BeARegularFile succeeds if a file exists and is a regular file.
//Actual must be a string representing the abs path to the file being checked.
func BeARegularFile() types.GomegaMatcher {
return &matchers.BeARegularFileMatcher{}
}
-//BeADirectory succeeds iff a file exists and is a directory.
+//BeADirectory succeeds if a file exists and is a directory.
//Actual must be a string representing the abs path to the file being checked.
func BeADirectory() types.GomegaMatcher {
return &matchers.BeADirectoryMatcher{}
diff --git a/vendor/github.com/onsi/gomega/matchers/attributes_slice.go b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go
new file mode 100644
index 0000000000..355b362f4b
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/attributes_slice.go
@@ -0,0 +1,14 @@
+package matchers
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+type attributesSlice []xml.Attr
+
+func (attrs attributesSlice) Len() int { return len(attrs) }
+func (attrs attributesSlice) Less(i, j int) bool {
+ return strings.Compare(attrs[i].Name.Local, attrs[j].Name.Local) == -1
+}
+func (attrs attributesSlice) Swap(i, j int) { attrs[i], attrs[j] = attrs[j], attrs[i] }
diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
index c1b499597d..ed6f69288e 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type BeClosedMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
index 55bdd7d15d..8b00311b0d 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
index 32a0c3108a..97ab20a4ec 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type BeEquivalentToMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
index 0b224cbbc6..91d3b779ea 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go
index 205d71f405..8ac4468505 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher_test.go
@@ -1,9 +1,10 @@
package matchers_test
import (
- . "github.com/onsi/gomega/matchers"
"time"
+ . "github.com/onsi/gomega/matchers"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
index abda4eb1e7..cb7c038ef0 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"time"
+
+ "github.com/onsi/gomega/format"
)
type BeTemporallyMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go
index feb33e5dc1..6140e49dc8 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go
@@ -1,10 +1,11 @@
package matchers_test
import (
+ "time"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/matchers"
- "time"
)
var _ = Describe("BeTemporally", func() {
diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
index 1275e5fc9d..ec57c5db4c 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
index b39c9144be..26196f168f 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -1,8 +1,9 @@
package matchers
import (
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type BeZeroMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
index 2e7608921a..f8dc41e74f 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"strings"
+
+ "github.com/onsi/gomega/format"
)
type ContainSubstringMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
index 5701ba6e24..ea5b923366 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type HaveKeyMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
index 464ac187e9..06355b1e95 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type HaveKeyWithValueMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
index a183775570..ee4276189d 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go
index 009e23e5fc..bb52fbe2f1 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go
@@ -2,6 +2,7 @@ package matchers_test
import (
"errors"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/matchers"
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
index 8b63a89997..1d8e80270b 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
index afc78fc901..40a3526eb2 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"fmt"
+
"github.com/onsi/gomega/format"
)
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index 03cdf04588..07499ac959 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"reflect"
+
+ "github.com/onsi/gomega/format"
)
type MatchErrorMatcher struct {
@@ -21,14 +22,14 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e
actualErr := actual.(error)
- if isString(matcher.Expected) {
- return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil
- }
-
if isError(matcher.Expected) {
return reflect.DeepEqual(actualErr, matcher.Expected), nil
}
+ if isString(matcher.Expected) {
+ return actualErr.Error() == matcher.Expected, nil
+ }
+
var subMatcher omegaMatcher
var hasSubMatcher bool
if matcher.Expected != nil {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go
index 338b512954..b9b1489f6c 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher_test.go
@@ -3,6 +3,7 @@ package matchers_test
import (
"errors"
"fmt"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/matchers"
@@ -90,4 +91,17 @@ var _ = Describe("MatchErrorMatcher", func() {
Ω(err).Should(HaveOccurred())
})
})
+
+ Context("when passed an error that is also a string", func() {
+ It("should use it as an error", func() {
+ var e mockErr = "mockErr"
+
+ // this fails if the matcher casts e to a string before comparison
+ Ω(e).Should(MatchError(e))
+ })
+ })
})
+
+type mockErr string
+
+func (m mockErr) Error() string { return string(m) }
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
index 7ca79a15be..adac5db6b8 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -2,8 +2,9 @@ package matchers
import (
"fmt"
- "github.com/onsi/gomega/format"
"regexp"
+
+ "github.com/onsi/gomega/format"
)
type MatchRegexpMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index da26562902..3b412ce818 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"reflect"
+ "sort"
"strings"
"github.com/onsi/gomega/format"
@@ -82,6 +83,8 @@ func parseXmlContent(content string) (*xmlNode, error) {
switch tok := tok.(type) {
case xml.StartElement:
+ attrs := attributesSlice(tok.Attr)
+ sort.Sort(attrs)
allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr})
case xml.EndElement:
if len(allNodes) > 1 {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go
index 16c1922407..b325f361f9 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher_test.go
@@ -23,6 +23,13 @@ var _ = Describe("MatchXMLMatcher", func() {
)
Context("When passed stringifiables", func() {
+ It("matches documents regardless of the attribute order", func() {
+ a := ``
+ b := ``
+ Ω(b).Should(MatchXML(a))
+ Ω(a).Should(MatchXML(b))
+ })
+
It("should succeed if the XML matches", func() {
Ω(sample_01).Should(MatchXML(sample_01)) // same XML
Ω(sample_01).Should(MatchXML(sample_02)) // same XML with blank lines
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 119d21ef31..81b3771119 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -15,12 +15,12 @@ type BipartiteGraph struct {
func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
for i, _ := range leftValues {
- left = append(left, Node{i})
+ left = append(left, Node{Id: i})
}
right := NodeOrderedSet{}
for j, _ := range rightValues {
- right = append(right, Node{j + len(left)})
+ right = append(right, Node{Id: j + len(left)})
}
edges := EdgeSet{}
@@ -32,7 +32,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
}
if neighbours {
- edges = append(edges, Edge{left[i], right[j]})
+ edges = append(edges, Edge{Node1: left[i], Node2: right[j]})
}
}
}
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
index 38517e2ed9..20dd53b8d3 100644
--- a/vendor/github.com/satori/go.uuid/.travis.yml
+++ b/vendor/github.com/satori/go.uuid/.travis.yml
@@ -6,6 +6,14 @@ go:
- 1.4
- 1.5
- 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
index 488357b8af..926d549870 100644
--- a/vendor/github.com/satori/go.uuid/LICENSE
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 2013-2016 by Maxim Bublis
+Copyright (C) 2013-2018 by Maxim Bublis
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
index b6aad1c813..7b1a722dff 100644
--- a/vendor/github.com/satori/go.uuid/README.md
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -59,7 +59,7 @@ func main() {
## Copyright
-Copyright (C) 2013-2016 by Maxim Bublis .
+Copyright (C) 2013-2018 by Maxim Bublis .
UUID package released under MIT License.
See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/benchmarks_test.go b/vendor/github.com/satori/go.uuid/benchmarks_test.go
deleted file mode 100644
index b4e567fc64..0000000000
--- a/vendor/github.com/satori/go.uuid/benchmarks_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (C) 2013-2015 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "testing"
-)
-
-func BenchmarkFromBytes(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- for i := 0; i < b.N; i++ {
- FromBytes(bytes)
- }
-}
-
-func BenchmarkFromString(b *testing.B) {
- s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringUrn(b *testing.B) {
- s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringWithBrackets(b *testing.B) {
- s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkNewV1(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV1()
- }
-}
-
-func BenchmarkNewV2(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV2(DomainPerson)
- }
-}
-
-func BenchmarkNewV3(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV3(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkNewV4(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV4()
- }
-}
-
-func BenchmarkNewV5(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV5(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkMarshalBinary(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalBinary()
- }
-}
-
-func BenchmarkMarshalText(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalText()
- }
-}
-
-func BenchmarkUnmarshalBinary(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalBinary(bytes)
- }
-}
-
-func BenchmarkUnmarshalText(b *testing.B) {
- bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalText(bytes)
- }
-}
-
-func BenchmarkMarshalToString(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.String()
- }
-}
diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
new file mode 100644
index 0000000000..656892c53e
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/codec.go
@@ -0,0 +1,206 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+)
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+ err = u.UnmarshalBinary(input)
+ return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+ err = u.UnmarshalText([]byte(input))
+ return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+ text = []byte(u.String())
+ return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+// "6ba7b8109dad11d180b400c04fd430c8"
+// ABNF for supported UUID text representation follows:
+// uuid := canonical | hashlike | braced | urn
+// plain := canonical | hashlike
+// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
+// hashlike := 12hexoct
+// braced := '{' plain '}'
+// urn := URN ':' UUID-NID ':' plain
+// URN := 'urn'
+// UUID-NID := 'uuid'
+// 12hexoct := 6hexoct 6hexoct
+// 6hexoct := 4hexoct 2hexoct
+// 4hexoct := 2hexoct 2hexoct
+// 2hexoct := hexoct hexoct
+// hexoct := hexdig hexdig
+// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
+// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
+// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+ switch len(text) {
+ case 32:
+ return u.decodeHashLike(text)
+ case 36:
+ return u.decodeCanonical(text)
+ case 38:
+ return u.decodeBraced(text)
+ case 41:
+ fallthrough
+ case 45:
+ return u.decodeURN(text)
+ default:
+ return fmt.Errorf("uuid: incorrect UUID length: %s", text)
+ }
+}
+
+// decodeCanonical decodes UUID string in format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
+func (u *UUID) decodeCanonical(t []byte) (err error) {
+ if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ src := t[:]
+ dst := u[:]
+
+ for i, byteGroup := range byteGroups {
+ if i > 0 {
+ src = src[1:] // skip dash
+ }
+ _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
+ if err != nil {
+ return
+ }
+ src = src[byteGroup:]
+ dst = dst[byteGroup/2:]
+ }
+
+ return
+}
+
+// decodeHashLike decodes UUID string in format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeHashLike(t []byte) (err error) {
+ src := t[:]
+ dst := u[:]
+
+ if _, err = hex.Decode(dst, src); err != nil {
+ return err
+ }
+ return
+}
+
+// decodeBraced decodes UUID string in format
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
+// "{6ba7b8109dad11d180b400c04fd430c8}".
+func (u *UUID) decodeBraced(t []byte) (err error) {
+ l := len(t)
+
+ if t[0] != '{' || t[l-1] != '}' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ return u.decodePlain(t[1 : l-1])
+}
+
+// decodeURN decodes UUID string in format
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeURN(t []byte) (err error) {
+ total := len(t)
+
+ urn_uuid_prefix := t[:9]
+
+ if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
+ return fmt.Errorf("uuid: incorrect UUID format: %s", t)
+ }
+
+ return u.decodePlain(t[9:total])
+}
+
+// decodePlain decodes UUID string in canonical format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodePlain(t []byte) (err error) {
+ switch len(t) {
+ case 32:
+ return u.decodeHashLike(t)
+ case 36:
+ return u.decodeCanonical(t)
+ default:
+ return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
+ }
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+ data = u.Bytes()
+ return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+ if len(data) != Size {
+ err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ return
+ }
+ copy(u[:], data)
+
+ return
+}
diff --git a/vendor/github.com/satori/go.uuid/codec_test.go b/vendor/github.com/satori/go.uuid/codec_test.go
new file mode 100644
index 0000000000..101ec521c2
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/codec_test.go
@@ -0,0 +1,248 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+
+ . "gopkg.in/check.v1"
+)
+
+type codecTestSuite struct{}
+
+var _ = Suite(&codecTestSuite{})
+
+func (s *codecTestSuite) TestFromBytes(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1, err := FromBytes(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte{}
+ _, err = FromBytes(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) BenchmarkFromBytes(c *C) {
+ bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ for i := 0; i < c.N; i++ {
+ FromBytes(bytes)
+ }
+}
+
+func (s *codecTestSuite) TestMarshalBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ b2, err := u.MarshalBinary()
+ c.Assert(err, IsNil)
+ c.Assert(bytes.Equal(b1, b2), Equals, true)
+}
+
+func (s *codecTestSuite) BenchmarkMarshalBinary(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ u.MarshalBinary()
+ }
+}
+
+func (s *codecTestSuite) TestUnmarshalBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.UnmarshalBinary(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte{}
+ u2 := UUID{}
+ err = u2.UnmarshalBinary(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) TestFromString(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ s4 := "6ba7b8109dad11d180b400c04fd430c8"
+ s5 := "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
+
+ _, err := FromString("")
+ c.Assert(err, NotNil)
+
+ u1, err := FromString(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ u2, err := FromString(s2)
+ c.Assert(err, IsNil)
+ c.Assert(u2, Equals, u)
+
+ u3, err := FromString(s3)
+ c.Assert(err, IsNil)
+ c.Assert(u3, Equals, u)
+
+ u4, err := FromString(s4)
+ c.Assert(err, IsNil)
+ c.Assert(u4, Equals, u)
+
+ u5, err := FromString(s5)
+ c.Assert(err, IsNil)
+ c.Assert(u5, Equals, u)
+}
+
+func (s *codecTestSuite) BenchmarkFromString(c *C) {
+ str := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) BenchmarkFromStringUrn(c *C) {
+ str := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) BenchmarkFromStringWithBrackets(c *C) {
+ str := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringShort(c *C) {
+ // Invalid 35-character UUID string
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c"
+
+ for i := len(s1); i >= 0; i-- {
+ _, err := FromString(s1[:i])
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringLong(c *C) {
+ // Invalid 37+ character UUID string
+ strings := []string{
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c8=",
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f",
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8",
+ }
+
+ for _, str := range strings {
+ _, err := FromString(str)
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringInvalid(c *C) {
+ // Invalid UUID string formats
+ strings := []string{
+ "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8",
+ "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "uuid:urn:6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+ "uuid:urn:6ba7b8109dad11d180b400c04fd430c8",
+ "6ba7b8109-dad-11d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad1-1d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad-11d18-0b4-00c04fd430c8",
+ "6ba7b810-9dad-11d1-80b40-0c04fd430c8",
+ "6ba7b810+9dad+11d1+80b4+00c04fd430c8",
+ "(6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "{6ba7b810-9dad-11d1-80b4-00c04fd430c8>",
+ "zba7b810-9dad-11d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad11d180b400c04fd430c8",
+ "6ba7b8109dad-11d180b400c04fd430c8",
+ "6ba7b8109dad11d1-80b400c04fd430c8",
+ "6ba7b8109dad11d180b4-00c04fd430c8",
+ }
+
+ for _, str := range strings {
+ _, err := FromString(str)
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringOrNil(c *C) {
+ u := FromStringOrNil("")
+ c.Assert(u, Equals, Nil)
+}
+
+func (s *codecTestSuite) TestFromBytesOrNil(c *C) {
+ b := []byte{}
+ u := FromBytesOrNil(b)
+ c.Assert(u, Equals, Nil)
+}
+
+func (s *codecTestSuite) TestMarshalText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ b2, err := u.MarshalText()
+ c.Assert(err, IsNil)
+ c.Assert(bytes.Equal(b1, b2), Equals, true)
+}
+
+func (s *codecTestSuite) BenchmarkMarshalText(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ u.MarshalText()
+ }
+}
+
+func (s *codecTestSuite) TestUnmarshalText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.UnmarshalText(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte("")
+ u2 := UUID{}
+ err = u2.UnmarshalText(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) BenchmarkUnmarshalText(c *C) {
+ bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ u := UUID{}
+ for i := 0; i < c.N; i++ {
+ u.UnmarshalText(bytes)
+ }
+}
+
+var sink string
+
+func (s *codecTestSuite) BenchmarkMarshalToString(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ sink = u.String()
+ }
+}
diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
new file mode 100644
index 0000000000..3f2f1da2dc
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/generator.go
@@ -0,0 +1,239 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "hash"
+ "net"
+ "os"
+ "sync"
+ "time"
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+var (
+ global = newDefaultGenerator()
+
+ epochFunc = unixTimeFunc
+ posixUID = uint32(os.Getuid())
+ posixGID = uint32(os.Getgid())
+)
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+ return global.NewV1()
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+ return global.NewV2(domain)
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ return global.NewV3(ns, name)
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+ return global.NewV4()
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ return global.NewV5(ns, name)
+}
+
+// Generator provides interface for generating UUIDs.
+type Generator interface {
+ NewV1() UUID
+ NewV2(domain byte) UUID
+ NewV3(ns UUID, name string) UUID
+ NewV4() UUID
+ NewV5(ns UUID, name string) UUID
+}
+
+// Default generator implementation.
+type generator struct {
+ storageOnce sync.Once
+ storageMutex sync.Mutex
+
+ lastTime uint64
+ clockSequence uint16
+ hardwareAddr [6]byte
+}
+
+func newDefaultGenerator() Generator {
+ return &generator{}
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func (g *generator) NewV1() UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V1)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func (g *generator) NewV2(domain byte) UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ switch domain {
+ case DomainPerson:
+ binary.BigEndian.PutUint32(u[0:], posixUID)
+ case DomainGroup:
+ binary.BigEndian.PutUint32(u[0:], posixGID)
+ }
+
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+ u[9] = domain
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V2)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func (g *generator) NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(V3)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV4 returns random generated UUID.
+func (g *generator) NewV4() UUID {
+ u := UUID{}
+ g.safeRandom(u[:])
+ u.SetVersion(V4)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func (g *generator) NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(V5)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+func (g *generator) initStorage() {
+ g.initClockSequence()
+ g.initHardwareAddr()
+}
+
+func (g *generator) initClockSequence() {
+ buf := make([]byte, 2)
+ g.safeRandom(buf)
+ g.clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func (g *generator) initHardwareAddr() {
+ interfaces, err := net.Interfaces()
+ if err == nil {
+ for _, iface := range interfaces {
+ if len(iface.HardwareAddr) >= 6 {
+ copy(g.hardwareAddr[:], iface.HardwareAddr)
+ return
+ }
+ }
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence
+ g.safeRandom(g.hardwareAddr[:])
+
+ // Set multicast bit as recommended in RFC 4122
+ g.hardwareAddr[0] |= 0x01
+}
+
+func (g *generator) safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func (g *generator) getStorage() (uint64, uint16, []byte) {
+ g.storageOnce.Do(g.initStorage)
+
+ g.storageMutex.Lock()
+ defer g.storageMutex.Unlock()
+
+ timeNow := epochFunc()
+ // Clock changed backwards since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= g.lastTime {
+ g.clockSequence++
+ }
+ g.lastTime = timeNow
+
+ return timeNow, g.clockSequence, g.hardwareAddr[:]
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+ return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
diff --git a/vendor/github.com/satori/go.uuid/generator_test.go b/vendor/github.com/satori/go.uuid/generator_test.go
new file mode 100644
index 0000000000..cd69e2efb8
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/generator_test.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type genTestSuite struct{}
+
+var _ = Suite(&genTestSuite{})
+
+func (s *genTestSuite) TestNewV1(c *C) {
+ u := NewV1()
+ c.Assert(u.Version(), Equals, V1)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+
+ u1 := NewV1()
+ u2 := NewV1()
+ c.Assert(u1, Not(Equals), u2)
+
+ oldFunc := epochFunc
+ epochFunc = func() uint64 { return 0 }
+
+ u3 := NewV1()
+ u4 := NewV1()
+ c.Assert(u3, Not(Equals), u4)
+
+ epochFunc = oldFunc
+}
+
+func (s *genTestSuite) BenchmarkNewV1(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV1()
+ }
+}
+
+func (s *genTestSuite) TestNewV2(c *C) {
+ u1 := NewV2(DomainPerson)
+ c.Assert(u1.Version(), Equals, V2)
+ c.Assert(u1.Variant(), Equals, VariantRFC4122)
+
+ u2 := NewV2(DomainGroup)
+ c.Assert(u2.Version(), Equals, V2)
+ c.Assert(u2.Variant(), Equals, VariantRFC4122)
+}
+
+func (s *genTestSuite) BenchmarkNewV2(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV2(DomainPerson)
+ }
+}
+
+func (s *genTestSuite) TestNewV3(c *C) {
+ u := NewV3(NamespaceDNS, "www.example.com")
+ c.Assert(u.Version(), Equals, V3)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+ c.Assert(u.String(), Equals, "5df41881-3aed-3515-88a7-2f4a814cf09e")
+
+ u = NewV3(NamespaceDNS, "python.org")
+ c.Assert(u.String(), Equals, "6fa459ea-ee8a-3ca4-894e-db77e160355e")
+
+ u1 := NewV3(NamespaceDNS, "golang.org")
+ u2 := NewV3(NamespaceDNS, "golang.org")
+ c.Assert(u1, Equals, u2)
+
+ u3 := NewV3(NamespaceDNS, "example.com")
+ c.Assert(u1, Not(Equals), u3)
+
+ u4 := NewV3(NamespaceURL, "golang.org")
+ c.Assert(u1, Not(Equals), u4)
+}
+
+func (s *genTestSuite) BenchmarkNewV3(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV3(NamespaceDNS, "www.example.com")
+ }
+}
+
+func (s *genTestSuite) TestNewV4(c *C) {
+ u := NewV4()
+ c.Assert(u.Version(), Equals, V4)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+}
+
+func (s *genTestSuite) BenchmarkNewV4(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV4()
+ }
+}
+
+func (s *genTestSuite) TestNewV5(c *C) {
+ u := NewV5(NamespaceDNS, "www.example.com")
+ c.Assert(u.Version(), Equals, V5)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+
+ u = NewV5(NamespaceDNS, "python.org")
+ c.Assert(u.String(), Equals, "886313e1-3b8a-5372-9b90-0c9aee199e5d")
+
+ u1 := NewV5(NamespaceDNS, "golang.org")
+ u2 := NewV5(NamespaceDNS, "golang.org")
+ c.Assert(u1, Equals, u2)
+
+ u3 := NewV5(NamespaceDNS, "example.com")
+ c.Assert(u1, Not(Equals), u3)
+
+ u4 := NewV5(NamespaceURL, "golang.org")
+ c.Assert(u1, Not(Equals), u4)
+}
+
+func (s *genTestSuite) BenchmarkNewV5(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV5(NamespaceDNS, "www.example.com")
+ }
+}
diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
new file mode 100644
index 0000000000..56759d3905
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/sql.go
@@ -0,0 +1,78 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+ return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ if len(src) == Size {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+ UUID UUID
+ Valid bool
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+ if !u.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+ if src == nil {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ // Delegate to UUID Scan function
+ u.Valid = true
+ return u.UUID.Scan(src)
+}
diff --git a/vendor/github.com/satori/go.uuid/sql_test.go b/vendor/github.com/satori/go.uuid/sql_test.go
new file mode 100644
index 0000000000..74255f50d9
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/sql_test.go
@@ -0,0 +1,136 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type sqlTestSuite struct{}
+
+var _ = Suite(&sqlTestSuite{})
+
+func (s *sqlTestSuite) TestValue(c *C) {
+ u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ c.Assert(err, IsNil)
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, u.String())
+}
+
+func (s *sqlTestSuite) TestValueNil(c *C) {
+ u := UUID{}
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, Nil.String())
+}
+
+func (s *sqlTestSuite) TestNullUUIDValueNil(c *C) {
+ u := NullUUID{}
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, IsNil)
+}
+
+func (s *sqlTestSuite) TestScanBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ b2 := []byte{}
+ u2 := UUID{}
+
+ err = u2.Scan(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanString(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ u1 := UUID{}
+ err := u1.Scan(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ s2 := ""
+ u2 := UUID{}
+
+ err = u2.Scan(s2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ b2 := []byte("")
+ u2 := UUID{}
+ err = u2.Scan(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanUnsupported(c *C) {
+ u := UUID{}
+
+ err := u.Scan(true)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanNil(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ err := u.Scan(nil)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestNullUUIDScanValid(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ u1 := NullUUID{}
+ err := u1.Scan(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u1.Valid, Equals, true)
+ c.Assert(u1.UUID, Equals, u)
+}
+
+func (s *sqlTestSuite) TestNullUUIDScanNil(c *C) {
+ u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true}
+
+ err := u.Scan(nil)
+ c.Assert(err, IsNil)
+ c.Assert(u.Valid, Equals, false)
+ c.Assert(u.UUID, Equals, Nil)
+}
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
index 9c7fbaa54e..a2b8e2ca2a 100644
--- a/vendor/github.com/satori/go.uuid/uuid.go
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2013-2015 by Maxim Bublis
+// Copyright (C) 2013-2018 by Maxim Bublis
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
@@ -26,23 +26,29 @@ package uuid
import (
"bytes"
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "database/sql/driver"
- "encoding/binary"
"encoding/hex"
- "fmt"
- "hash"
- "net"
- "os"
- "sync"
- "time"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [Size]byte
+
+// UUID versions
+const (
+ _ byte = iota
+ V1
+ V2
+ V3
+ V4
+ V5
)
// UUID layout variants.
const (
- VariantNCS = iota
+ VariantNCS byte = iota
VariantRFC4122
VariantMicrosoft
VariantFuture
@@ -55,136 +61,48 @@ const (
DomainOrg
)
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-// Used in string method conversion
-const dash byte = '-'
-
-// UUID v1/v2 storage.
-var (
- storageMutex sync.Mutex
- storageOnce sync.Once
- epochFunc = unixTimeFunc
- clockSequence uint16
- lastTime uint64
- hardwareAddr [6]byte
- posixUID = uint32(os.Getuid())
- posixGID = uint32(os.Getgid())
-)
-
// String parse helpers.
var (
urnPrefix = []byte("urn:uuid:")
byteGroups = []int{8, 4, 4, 4, 12}
)
-func initClockSequence() {
- buf := make([]byte, 2)
- safeRandom(buf)
- clockSequence = binary.BigEndian.Uint16(buf)
-}
-
-func initHardwareAddr() {
- interfaces, err := net.Interfaces()
- if err == nil {
- for _, iface := range interfaces {
- if len(iface.HardwareAddr) >= 6 {
- copy(hardwareAddr[:], iface.HardwareAddr)
- return
- }
- }
- }
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence
- safeRandom(hardwareAddr[:])
-
- // Set multicast bit as recommended in RFC 4122
- hardwareAddr[0] |= 0x01
-}
-
-func initStorage() {
- initClockSequence()
- initHardwareAddr()
-}
-
-func safeRandom(dest []byte) {
- if _, err := rand.Read(dest); err != nil {
- panic(err)
- }
-}
-
-// Returns difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and current time.
-// This is default epoch calculation function.
-func unixTimeFunc() uint64 {
- return epochStart + uint64(time.Now().UnixNano()/100)
-}
-
-// UUID representation compliant with specification
-// described in RFC 4122.
-type UUID [16]byte
-
-// NullUUID can be used with the standard sql package to represent a
-// UUID value that can be NULL in the database
-type NullUUID struct {
- UUID UUID
- Valid bool
-}
-
-// The nil UUID is special form of UUID that is specified to have all
+// Nil is special form of UUID that is specified to have all
// 128 bits set to zero.
var Nil = UUID{}
// Predefined namespace UUIDs.
var (
- NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
- NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
- NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
)
-// And returns result of binary AND of two UUIDs.
-func And(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] & u2[i]
- }
- return u
-}
-
-// Or returns result of binary OR of two UUIDs.
-func Or(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] | u2[i]
- }
- return u
-}
-
// Equal returns true if u1 and u2 equals, otherwise returns false.
func Equal(u1 UUID, u2 UUID) bool {
return bytes.Equal(u1[:], u2[:])
}
// Version returns algorithm version used to generate UUID.
-func (u UUID) Version() uint {
- return uint(u[6] >> 4)
+func (u UUID) Version() byte {
+ return u[6] >> 4
}
// Variant returns UUID layout variant.
-func (u UUID) Variant() uint {
+func (u UUID) Variant() byte {
switch {
- case (u[8] & 0x80) == 0x00:
+ case (u[8] >> 7) == 0x00:
return VariantNCS
- case (u[8]&0xc0)|0x80 == 0x80:
+ case (u[8] >> 6) == 0x02:
return VariantRFC4122
- case (u[8]&0xe0)|0xc0 == 0xc0:
+ case (u[8] >> 5) == 0x06:
return VariantMicrosoft
+ case (u[8] >> 5) == 0x07:
+ fallthrough
+ default:
+ return VariantFuture
}
- return VariantFuture
}
// Bytes returns bytes slice representation of UUID.
@@ -198,13 +116,13 @@ func (u UUID) String() string {
buf := make([]byte, 36)
hex.Encode(buf[0:8], u[0:4])
- buf[8] = dash
+ buf[8] = '-'
hex.Encode(buf[9:13], u[4:6])
- buf[13] = dash
+ buf[13] = '-'
hex.Encode(buf[14:18], u[6:8])
- buf[18] = dash
+ buf[18] = '-'
hex.Encode(buf[19:23], u[8:10])
- buf[23] = dash
+ buf[23] = '-'
hex.Encode(buf[24:], u[10:])
return string(buf)
@@ -215,274 +133,29 @@ func (u *UUID) SetVersion(v byte) {
u[6] = (u[6] & 0x0f) | (v << 4)
}
-// SetVariant sets variant bits as described in RFC 4122.
-func (u *UUID) SetVariant() {
- u[8] = (u[8] & 0xbf) | 0x80
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String.
-func (u UUID) MarshalText() (text []byte, err error) {
- text = []byte(u.String())
- return
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-func (u *UUID) UnmarshalText(text []byte) (err error) {
- if len(text) < 32 {
- err = fmt.Errorf("uuid: UUID string too short: %s", text)
- return
+// SetVariant sets variant bits.
+func (u *UUID) SetVariant(v byte) {
+ switch v {
+ case VariantNCS:
+ u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+ case VariantRFC4122:
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+ case VariantMicrosoft:
+ u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+ case VariantFuture:
+ fallthrough
+ default:
+ u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
}
-
- t := text[:]
- braced := false
-
- if bytes.Equal(t[:9], urnPrefix) {
- t = t[9:]
- } else if t[0] == '{' {
- braced = true
- t = t[1:]
- }
-
- b := u[:]
-
- for i, byteGroup := range byteGroups {
- if i > 0 && t[0] == '-' {
- t = t[1:]
- } else if i > 0 && t[0] != '-' {
- err = fmt.Errorf("uuid: invalid string format")
- return
- }
-
- if i == 2 {
- if !bytes.Contains([]byte("012345"), []byte{t[0]}) {
- err = fmt.Errorf("uuid: invalid version number: %s", t[0])
- return
- }
- }
-
- if len(t) < byteGroup {
- err = fmt.Errorf("uuid: UUID string too short: %s", text)
- return
- }
-
- if i == 4 && len(t) > byteGroup &&
- ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
- err = fmt.Errorf("uuid: UUID string too long: %s", t)
- return
- }
-
- _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
-
- if err != nil {
- return
- }
-
- t = t[byteGroup:]
- b = b[byteGroup/2:]
- }
-
- return
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() (data []byte, err error) {
- data = u.Bytes()
- return
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) (err error) {
- if len(data) != 16 {
- err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- return
- }
- copy(u[:], data)
-
- return
}
-// Value implements the driver.Valuer interface.
-func (u UUID) Value() (driver.Value, error) {
- return u.String(), nil
-}
-
-// Scan implements the sql.Scanner interface.
-// A 16-byte slice is handled by UnmarshalBinary, while
-// a longer byte slice or a string is handled by UnmarshalText.
-func (u *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- if len(src) == 16 {
- return u.UnmarshalBinary(src)
- }
- return u.UnmarshalText(src)
-
- case string:
- return u.UnmarshalText([]byte(src))
- }
-
- return fmt.Errorf("uuid: cannot convert %T to UUID", src)
-}
-
-// Value implements the driver.Valuer interface.
-func (u NullUUID) Value() (driver.Value, error) {
- if !u.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return u.UUID.Value()
-}
-
-// Scan implements the sql.Scanner interface.
-func (u *NullUUID) Scan(src interface{}) error {
- if src == nil {
- u.UUID, u.Valid = Nil, false
- return nil
- }
-
- // Delegate to UUID Scan function
- u.Valid = true
- return u.UUID.Scan(src)
-}
-
-// FromBytes returns UUID converted from raw byte slice input.
-// It will return error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (u UUID, err error) {
- err = u.UnmarshalBinary(input)
- return
-}
-
-// FromBytesOrNil returns UUID converted from raw byte slice input.
-// Same behavior as FromBytes, but returns a Nil UUID on error.
-func FromBytesOrNil(input []byte) UUID {
- uuid, err := FromBytes(input)
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
+func Must(u UUID, err error) UUID {
if err != nil {
- return Nil
- }
- return uuid
-}
-
-// FromString returns UUID parsed from string input.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(input string) (u UUID, err error) {
- err = u.UnmarshalText([]byte(input))
- return
-}
-
-// FromStringOrNil returns UUID parsed from string input.
-// Same behavior as FromString, but returns a Nil UUID on error.
-func FromStringOrNil(input string) UUID {
- uuid, err := FromString(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// Returns UUID v1/v2 storage state.
-// Returns epoch timestamp, clock sequence, and hardware address.
-func getStorage() (uint64, uint16, []byte) {
- storageOnce.Do(initStorage)
-
- storageMutex.Lock()
- defer storageMutex.Unlock()
-
- timeNow := epochFunc()
- // Clock changed backwards since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= lastTime {
- clockSequence++
- }
- lastTime = timeNow
-
- return timeNow, clockSequence, hardwareAddr[:]
-}
-
-// NewV1 returns UUID based on current timestamp and MAC address.
-func NewV1() UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := getStorage()
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(1)
- u.SetVariant()
-
- return u
-}
-
-// NewV2 returns DCE Security UUID based on POSIX UID/GID.
-func NewV2(domain byte) UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := getStorage()
-
- switch domain {
- case DomainPerson:
- binary.BigEndian.PutUint32(u[0:], posixUID)
- case DomainGroup:
- binary.BigEndian.PutUint32(u[0:], posixGID)
+ panic(err)
}
-
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
- u[9] = domain
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(2)
- u.SetVariant()
-
- return u
-}
-
-// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(3)
- u.SetVariant()
-
- return u
-}
-
-// NewV4 returns random generated UUID.
-func NewV4() UUID {
- u := UUID{}
- safeRandom(u[:])
- u.SetVersion(4)
- u.SetVariant()
-
- return u
-}
-
-// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(5)
- u.SetVariant()
-
- return u
-}
-
-// Returns UUID based on hashing of namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
return u
}
diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go
index aa68ac94f5..beb336d577 100644
--- a/vendor/github.com/satori/go.uuid/uuid_test.go
+++ b/vendor/github.com/satori/go.uuid/uuid_test.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2013, 2015 by Maxim Bublis
+// Copyright (C) 2013-2018 by Maxim Bublis
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
@@ -24,610 +24,67 @@ package uuid
import (
"bytes"
"testing"
+
+ . "gopkg.in/check.v1"
)
-func TestBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+// Hook up gocheck into the "go test" runner.
+func TestUUID(t *testing.T) { TestingT(t) }
- bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+type testSuite struct{}
- if !bytes.Equal(u.Bytes(), bytes1) {
- t.Errorf("Incorrect bytes representation for UUID: %s", u)
- }
-}
+var _ = Suite(&testSuite{})
-func TestString(t *testing.T) {
- if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" {
- t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String())
- }
-}
+func (s *testSuite) TestBytes(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-func TestEqual(t *testing.T) {
- if !Equal(NamespaceDNS, NamespaceDNS) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS)
- }
+ bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- if Equal(NamespaceDNS, NamespaceURL) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL)
- }
+ c.Assert(bytes.Equal(u.Bytes(), bytes1), Equals, true)
}
-func TestOr(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
- if !Equal(u, Or(u1, u2)) {
- t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2))
- }
+func (s *testSuite) TestString(c *C) {
+ c.Assert(NamespaceDNS.String(), Equals, "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
}
-func TestAnd(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if !Equal(u, And(u1, u2)) {
- t.Errorf("Incorrect bitwise AND result %s", And(u1, u2))
- }
+func (s *testSuite) TestEqual(c *C) {
+ c.Assert(Equal(NamespaceDNS, NamespaceDNS), Equals, true)
+ c.Assert(Equal(NamespaceDNS, NamespaceURL), Equals, false)
}
-func TestVersion(t *testing.T) {
+func (s *testSuite) TestVersion(c *C) {
u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u.Version() != 1 {
- t.Errorf("Incorrect version for UUID: %d", u.Version())
- }
+ c.Assert(u.Version(), Equals, V1)
}
-func TestSetVersion(t *testing.T) {
+func (s *testSuite) TestSetVersion(c *C) {
u := UUID{}
u.SetVersion(4)
-
- if u.Version() != 4 {
- t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version())
- }
+ c.Assert(u.Version(), Equals, V4)
}
-func TestVariant(t *testing.T) {
+func (s *testSuite) TestVariant(c *C) {
u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u1.Variant() != VariantNCS {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant())
- }
+ c.Assert(u1.Variant(), Equals, VariantNCS)
u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant())
- }
+ c.Assert(u2.Variant(), Equals, VariantRFC4122)
u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u3.Variant() != VariantMicrosoft {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant())
- }
+ c.Assert(u3.Variant(), Equals, VariantMicrosoft)
u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u4.Variant() != VariantFuture {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant())
- }
-}
-
-func TestSetVariant(t *testing.T) {
- u := new(UUID)
- u.SetVariant()
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant())
- }
-}
-
-func TestFromBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1, err := FromBytes(b1)
- if err != nil {
- t.Errorf("Error parsing UUID from bytes: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
-
- _, err = FromBytes(b2)
- if err == nil {
- t.Errorf("Should return error parsing from empty byte slice, got %s", err)
- }
-}
-
-func TestMarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- b2, err := u.MarshalBinary()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
+ c.Assert(u4.Variant(), Equals, VariantFuture)
}
-func TestUnmarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1 := UUID{}
- err := u1.UnmarshalBinary(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
- u2 := UUID{}
-
- err = u2.UnmarshalBinary(b2)
- if err == nil {
- t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
- }
-}
-
-func TestFromString(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- _, err := FromString("")
- if err == nil {
- t.Errorf("Should return error trying to parse empty string, got %s", err)
- }
-
- u1, err := FromString(s1)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- u2, err := FromString(s2)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u2) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u2)
- }
-
- u3, err := FromString(s3)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u3) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u3)
- }
-}
-
-func TestFromStringShort(t *testing.T) {
- // Invalid 35-character UUID string
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c"
-
- for i := len(s1); i >= 0; i-- {
- _, err := FromString(s1[:i])
- if err == nil {
- t.Errorf("Should return error trying to parse too short string, got %s", err)
- }
- }
-}
-
-func TestFromStringLong(t *testing.T) {
- // Invalid 37+ character UUID string
- s := []string{
- "6ba7b810-9dad-11d1-80b4-00c04fd430c8=",
- "6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
- "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f",
- "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8",
- }
-
- for _, str := range s {
- _, err := FromString(str)
- if err == nil {
- t.Errorf("Should return error trying to parse too long string, passed %s", str)
- }
- }
-}
-
-func TestFromStringInvalid(t *testing.T) {
- // Invalid UUID string formats
- s := []string{
- "6ba7b8109dad11d180b400c04fd430c8",
- "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8",
- "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
- "6ba7b8109-dad-11d1-80b4-00c04fd430c8",
- "6ba7b810-9dad1-1d1-80b4-00c04fd430c8",
- "6ba7b810-9dad-11d18-0b4-00c04fd430c8",
- "6ba7b810-9dad-11d1-80b40-0c04fd430c8",
- "6ba7b810+9dad+11d1+80b4+00c04fd430c8",
- "6ba7b810-9dad11d180b400c04fd430c8",
- "6ba7b8109dad-11d180b400c04fd430c8",
- "6ba7b8109dad11d1-80b400c04fd430c8",
- "6ba7b8109dad11d180b4-00c04fd430c8",
- }
-
- for _, str := range s {
- _, err := FromString(str)
- if err == nil {
- t.Errorf("Should return error trying to parse invalid string, passed %s", str)
- }
- }
-}
-
-func TestFromStringOrNil(t *testing.T) {
- u := FromStringOrNil("")
- if u != Nil {
- t.Errorf("Should return Nil UUID on parse failure, got %s", u)
- }
-}
-
-func TestFromBytesOrNil(t *testing.T) {
- b := []byte{}
- u := FromBytesOrNil(b)
- if u != Nil {
- t.Errorf("Should return Nil UUID on parse failure, got %s", u)
- }
-}
-
-func TestMarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- b2, err := u.MarshalText()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
-}
-
-func TestUnmarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- u1 := UUID{}
- err := u1.UnmarshalText(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte("")
- u2 := UUID{}
-
- err = u2.UnmarshalText(b2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestValue(t *testing.T) {
- u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != u.String() {
- t.Errorf("Wrong value returned, should be equal: %s and %s", val, u)
- }
-}
-
-func TestValueNil(t *testing.T) {
+func (s *testSuite) TestSetVariant(c *C) {
u := UUID{}
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != Nil.String() {
- t.Errorf("Wrong value returned, should be equal to UUID.Nil: %s", val)
- }
-}
-
-func TestNullUUIDValueNil(t *testing.T) {
- u := NullUUID{}
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != nil {
- t.Errorf("Wrong value returned, should be nil: %s", val)
- }
-}
-
-func TestScanBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1 := UUID{}
- err := u1.Scan(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
- u2 := UUID{}
-
- err = u2.Scan(b2)
- if err == nil {
- t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
- }
-}
-
-func TestScanString(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- u1 := UUID{}
- err := u1.Scan(s1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- s2 := ""
- u2 := UUID{}
-
- err = u2.Scan(s2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestScanText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- u1 := UUID{}
- err := u1.Scan(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte("")
- u2 := UUID{}
-
- err = u2.Scan(b2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestScanUnsupported(t *testing.T) {
- u := UUID{}
-
- err := u.Scan(true)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from bool")
- }
-}
-
-func TestScanNil(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- err := u.Scan(nil)
- if err == nil {
- t.Errorf("Error UUID shouldn't allow unmarshalling from nil")
- }
-}
-
-func TestNullUUIDScanValid(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- u1 := NullUUID{}
- err := u1.Scan(s1)
- if err != nil {
- t.Errorf("Error unmarshaling NullUUID: %s", err)
- }
-
- if !u1.Valid {
- t.Errorf("NullUUID should be valid")
- }
-
- if !Equal(u, u1.UUID) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1.UUID)
- }
-}
-
-func TestNullUUIDScanNil(t *testing.T) {
- u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true}
-
- err := u.Scan(nil)
- if err != nil {
- t.Errorf("Error unmarshaling NullUUID: %s", err)
- }
-
- if u.Valid {
- t.Errorf("NullUUID should not be valid")
- }
-
- if !Equal(u.UUID, Nil) {
- t.Errorf("NullUUID value should be equal to Nil: %s", u)
- }
-}
-
-func TestNewV1(t *testing.T) {
- u := NewV1()
-
- if u.Version() != 1 {
- t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant())
- }
-
- u1 := NewV1()
- u2 := NewV1()
-
- if Equal(u1, u2) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2)
- }
-
- oldFunc := epochFunc
- epochFunc = func() uint64 { return 0 }
-
- u3 := NewV1()
- u4 := NewV1()
-
- if Equal(u3, u4) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4)
- }
-
- epochFunc = oldFunc
-}
-
-func TestNewV2(t *testing.T) {
- u1 := NewV2(DomainPerson)
-
- if u1.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version())
- }
-
- if u1.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant())
- }
-
- u2 := NewV2(DomainGroup)
-
- if u2.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version())
- }
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant())
- }
-}
-
-func TestNewV3(t *testing.T) {
- u := NewV3(NamespaceDNS, "www.example.com")
-
- if u.Version() != 3 {
- t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant())
- }
-
- if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u = NewV3(NamespaceDNS, "python.org")
-
- if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV3(NamespaceDNS, "golang.org")
- u2 := NewV3(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV3(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV3(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
-}
-
-func TestNewV4(t *testing.T) {
- u := NewV4()
-
- if u.Version() != 4 {
- t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant())
- }
-}
-
-func TestNewV5(t *testing.T) {
- u := NewV5(NamespaceDNS, "www.example.com")
-
- if u.Version() != 5 {
- t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant())
- }
-
- u = NewV5(NamespaceDNS, "python.org")
-
- if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" {
- t.Errorf("UUIDv5 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV5(NamespaceDNS, "golang.org")
- u2 := NewV5(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV5(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV5(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
+ u.SetVariant(VariantNCS)
+ c.Assert(u.Variant(), Equals, VariantNCS)
+ u.SetVariant(VariantRFC4122)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+ u.SetVariant(VariantMicrosoft)
+ c.Assert(u.Variant(), Equals, VariantMicrosoft)
+ u.SetVariant(VariantFuture)
+ c.Assert(u.Variant(), Equals, VariantFuture)
}
diff --git a/vendor/github.com/satori/uuid/.travis.yml b/vendor/github.com/satori/uuid/.travis.yml
index fdf960e86b..20dd53b8d3 100644
--- a/vendor/github.com/satori/uuid/.travis.yml
+++ b/vendor/github.com/satori/uuid/.travis.yml
@@ -8,6 +8,7 @@ go:
- 1.6
- 1.7
- 1.8
+ - 1.9
- tip
matrix:
allow_failures:
diff --git a/vendor/github.com/satori/uuid/LICENSE b/vendor/github.com/satori/uuid/LICENSE
index 488357b8af..926d549870 100644
--- a/vendor/github.com/satori/uuid/LICENSE
+++ b/vendor/github.com/satori/uuid/LICENSE
@@ -1,4 +1,4 @@
-Copyright (C) 2013-2016 by Maxim Bublis
+Copyright (C) 2013-2018 by Maxim Bublis
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/vendor/github.com/satori/uuid/README.md b/vendor/github.com/satori/uuid/README.md
index b6aad1c813..7b1a722dff 100644
--- a/vendor/github.com/satori/uuid/README.md
+++ b/vendor/github.com/satori/uuid/README.md
@@ -59,7 +59,7 @@ func main() {
## Copyright
-Copyright (C) 2013-2016 by Maxim Bublis .
+Copyright (C) 2013-2018 by Maxim Bublis .
UUID package released under MIT License.
See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/uuid/benchmarks_test.go b/vendor/github.com/satori/uuid/benchmarks_test.go
deleted file mode 100644
index c3baeab8b2..0000000000
--- a/vendor/github.com/satori/uuid/benchmarks_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (C) 2013-2015 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "testing"
-)
-
-func BenchmarkFromBytes(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- for i := 0; i < b.N; i++ {
- FromBytes(bytes)
- }
-}
-
-func BenchmarkFromString(b *testing.B) {
- s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringUrn(b *testing.B) {
- s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkFromStringWithBrackets(b *testing.B) {
- s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- for i := 0; i < b.N; i++ {
- FromString(s)
- }
-}
-
-func BenchmarkNewV1(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV1()
- }
-}
-
-func BenchmarkNewV2(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV2(DomainPerson)
- }
-}
-
-func BenchmarkNewV3(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV3(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkNewV4(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV4()
- }
-}
-
-func BenchmarkNewV5(b *testing.B) {
- for i := 0; i < b.N; i++ {
- NewV5(NamespaceDNS, "www.example.com")
- }
-}
-
-func BenchmarkMarshalBinary(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalBinary()
- }
-}
-
-func BenchmarkMarshalText(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- u.MarshalText()
- }
-}
-
-func BenchmarkUnmarshalBinary(b *testing.B) {
- bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalBinary(bytes)
- }
-}
-
-func BenchmarkUnmarshalText(b *testing.B) {
- bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- u := UUID{}
- for i := 0; i < b.N; i++ {
- u.UnmarshalText(bytes)
- }
-}
-
-var sink string
-
-func BenchmarkMarshalToString(b *testing.B) {
- u := NewV4()
- for i := 0; i < b.N; i++ {
- sink = u.String()
- }
-}
diff --git a/vendor/github.com/satori/uuid/codec.go b/vendor/github.com/satori/uuid/codec.go
new file mode 100644
index 0000000000..656892c53e
--- /dev/null
+++ b/vendor/github.com/satori/uuid/codec.go
@@ -0,0 +1,206 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+)
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+ err = u.UnmarshalBinary(input)
+ return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+ uuid, err := FromBytes(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+ err = u.UnmarshalText([]byte(input))
+ return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+ uuid, err := FromString(input)
+ if err != nil {
+ return Nil
+ }
+ return uuid
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+ text = []byte(u.String())
+ return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+// "6ba7b8109dad11d180b400c04fd430c8"
+// ABNF for supported UUID text representation follows:
+// uuid := canonical | hashlike | braced | urn
+// plain := canonical | hashlike
+// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
+// hashlike := 12hexoct
+// braced := '{' plain '}'
+// urn := URN ':' UUID-NID ':' plain
+// URN := 'urn'
+// UUID-NID := 'uuid'
+// 12hexoct := 6hexoct 6hexoct
+// 6hexoct := 4hexoct 2hexoct
+// 4hexoct := 2hexoct 2hexoct
+// 2hexoct := hexoct hexoct
+// hexoct := hexdig hexdig
+// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
+// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
+// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+ switch len(text) {
+ case 32:
+ return u.decodeHashLike(text)
+ case 36:
+ return u.decodeCanonical(text)
+ case 38:
+ return u.decodeBraced(text)
+ case 41:
+ fallthrough
+ case 45:
+ return u.decodeURN(text)
+ default:
+ return fmt.Errorf("uuid: incorrect UUID length: %s", text)
+ }
+}
+
+// decodeCanonical decodes UUID string in format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
+func (u *UUID) decodeCanonical(t []byte) (err error) {
+ if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ src := t[:]
+ dst := u[:]
+
+ for i, byteGroup := range byteGroups {
+ if i > 0 {
+ src = src[1:] // skip dash
+ }
+ _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
+ if err != nil {
+ return
+ }
+ src = src[byteGroup:]
+ dst = dst[byteGroup/2:]
+ }
+
+ return
+}
+
+// decodeHashLike decodes UUID string in format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeHashLike(t []byte) (err error) {
+ src := t[:]
+ dst := u[:]
+
+ if _, err = hex.Decode(dst, src); err != nil {
+ return err
+ }
+ return
+}
+
+// decodeBraced decodes UUID string in format
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
+// "{6ba7b8109dad11d180b400c04fd430c8}".
+func (u *UUID) decodeBraced(t []byte) (err error) {
+ l := len(t)
+
+ if t[0] != '{' || t[l-1] != '}' {
+ return fmt.Errorf("uuid: incorrect UUID format %s", t)
+ }
+
+ return u.decodePlain(t[1 : l-1])
+}
+
+// decodeURN decodes UUID string in format
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeURN(t []byte) (err error) {
+ total := len(t)
+
+ urn_uuid_prefix := t[:9]
+
+ if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
+ return fmt.Errorf("uuid: incorrect UUID format: %s", t)
+ }
+
+ return u.decodePlain(t[9:total])
+}
+
+// decodePlain decodes UUID string in canonical format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodePlain(t []byte) (err error) {
+ switch len(t) {
+ case 32:
+ return u.decodeHashLike(t)
+ case 36:
+ return u.decodeCanonical(t)
+ default:
+ return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
+ }
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+ data = u.Bytes()
+ return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+ if len(data) != Size {
+ err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+ return
+ }
+ copy(u[:], data)
+
+ return
+}
diff --git a/vendor/github.com/satori/uuid/codec_test.go b/vendor/github.com/satori/uuid/codec_test.go
new file mode 100644
index 0000000000..101ec521c2
--- /dev/null
+++ b/vendor/github.com/satori/uuid/codec_test.go
@@ -0,0 +1,248 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "bytes"
+
+ . "gopkg.in/check.v1"
+)
+
+type codecTestSuite struct{}
+
+var _ = Suite(&codecTestSuite{})
+
+func (s *codecTestSuite) TestFromBytes(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1, err := FromBytes(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte{}
+ _, err = FromBytes(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) BenchmarkFromBytes(c *C) {
+ bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ for i := 0; i < c.N; i++ {
+ FromBytes(bytes)
+ }
+}
+
+func (s *codecTestSuite) TestMarshalBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ b2, err := u.MarshalBinary()
+ c.Assert(err, IsNil)
+ c.Assert(bytes.Equal(b1, b2), Equals, true)
+}
+
+func (s *codecTestSuite) BenchmarkMarshalBinary(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ u.MarshalBinary()
+ }
+}
+
+func (s *codecTestSuite) TestUnmarshalBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.UnmarshalBinary(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte{}
+ u2 := UUID{}
+ err = u2.UnmarshalBinary(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) TestFromString(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ s4 := "6ba7b8109dad11d180b400c04fd430c8"
+ s5 := "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
+
+ _, err := FromString("")
+ c.Assert(err, NotNil)
+
+ u1, err := FromString(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ u2, err := FromString(s2)
+ c.Assert(err, IsNil)
+ c.Assert(u2, Equals, u)
+
+ u3, err := FromString(s3)
+ c.Assert(err, IsNil)
+ c.Assert(u3, Equals, u)
+
+ u4, err := FromString(s4)
+ c.Assert(err, IsNil)
+ c.Assert(u4, Equals, u)
+
+ u5, err := FromString(s5)
+ c.Assert(err, IsNil)
+ c.Assert(u5, Equals, u)
+}
+
+func (s *codecTestSuite) BenchmarkFromString(c *C) {
+ str := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) BenchmarkFromStringUrn(c *C) {
+ str := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) BenchmarkFromStringWithBrackets(c *C) {
+ str := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
+ for i := 0; i < c.N; i++ {
+ FromString(str)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringShort(c *C) {
+ // Invalid 35-character UUID string
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c"
+
+ for i := len(s1); i >= 0; i-- {
+ _, err := FromString(s1[:i])
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringLong(c *C) {
+ // Invalid 37+ character UUID string
+ strings := []string{
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c8=",
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f",
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8",
+ }
+
+ for _, str := range strings {
+ _, err := FromString(str)
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringInvalid(c *C) {
+ // Invalid UUID string formats
+ strings := []string{
+ "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8",
+ "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "uuid:urn:6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+ "uuid:urn:6ba7b8109dad11d180b400c04fd430c8",
+ "6ba7b8109-dad-11d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad1-1d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad-11d18-0b4-00c04fd430c8",
+ "6ba7b810-9dad-11d1-80b40-0c04fd430c8",
+ "6ba7b810+9dad+11d1+80b4+00c04fd430c8",
+ "(6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+ "{6ba7b810-9dad-11d1-80b4-00c04fd430c8>",
+ "zba7b810-9dad-11d1-80b4-00c04fd430c8",
+ "6ba7b810-9dad11d180b400c04fd430c8",
+ "6ba7b8109dad-11d180b400c04fd430c8",
+ "6ba7b8109dad11d1-80b400c04fd430c8",
+ "6ba7b8109dad11d180b4-00c04fd430c8",
+ }
+
+ for _, str := range strings {
+ _, err := FromString(str)
+ c.Assert(err, NotNil)
+ }
+}
+
+func (s *codecTestSuite) TestFromStringOrNil(c *C) {
+ u := FromStringOrNil("")
+ c.Assert(u, Equals, Nil)
+}
+
+func (s *codecTestSuite) TestFromBytesOrNil(c *C) {
+ b := []byte{}
+ u := FromBytesOrNil(b)
+ c.Assert(u, Equals, Nil)
+}
+
+func (s *codecTestSuite) TestMarshalText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ b2, err := u.MarshalText()
+ c.Assert(err, IsNil)
+ c.Assert(bytes.Equal(b1, b2), Equals, true)
+}
+
+func (s *codecTestSuite) BenchmarkMarshalText(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ u.MarshalText()
+ }
+}
+
+func (s *codecTestSuite) TestUnmarshalText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.UnmarshalText(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u1, Equals, u)
+
+ b2 := []byte("")
+ u2 := UUID{}
+ err = u2.UnmarshalText(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *codecTestSuite) BenchmarkUnmarshalText(c *C) {
+ bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ u := UUID{}
+ for i := 0; i < c.N; i++ {
+ u.UnmarshalText(bytes)
+ }
+}
+
+var sink string
+
+func (s *codecTestSuite) BenchmarkMarshalToString(c *C) {
+ u := NewV4()
+ for i := 0; i < c.N; i++ {
+ sink = u.String()
+ }
+}
diff --git a/vendor/github.com/satori/uuid/generator.go b/vendor/github.com/satori/uuid/generator.go
new file mode 100644
index 0000000000..3f2f1da2dc
--- /dev/null
+++ b/vendor/github.com/satori/uuid/generator.go
@@ -0,0 +1,239 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "hash"
+ "net"
+ "os"
+ "sync"
+ "time"
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+var (
+ global = newDefaultGenerator()
+
+ epochFunc = unixTimeFunc
+ posixUID = uint32(os.Getuid())
+ posixGID = uint32(os.Getgid())
+)
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+ return global.NewV1()
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+ return global.NewV2(domain)
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+ return global.NewV3(ns, name)
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+ return global.NewV4()
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+ return global.NewV5(ns, name)
+}
+
+// Generator provides interface for generating UUIDs.
+type Generator interface {
+ NewV1() UUID
+ NewV2(domain byte) UUID
+ NewV3(ns UUID, name string) UUID
+ NewV4() UUID
+ NewV5(ns UUID, name string) UUID
+}
+
+// Default generator implementation.
+type generator struct {
+ storageOnce sync.Once
+ storageMutex sync.Mutex
+
+ lastTime uint64
+ clockSequence uint16
+ hardwareAddr [6]byte
+}
+
+func newDefaultGenerator() Generator {
+ return &generator{}
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func (g *generator) NewV1() UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V1)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func (g *generator) NewV2(domain byte) UUID {
+ u := UUID{}
+
+ timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+ switch domain {
+ case DomainPerson:
+ binary.BigEndian.PutUint32(u[0:], posixUID)
+ case DomainGroup:
+ binary.BigEndian.PutUint32(u[0:], posixGID)
+ }
+
+ binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+ binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+ binary.BigEndian.PutUint16(u[8:], clockSeq)
+ u[9] = domain
+
+ copy(u[10:], hardwareAddr)
+
+ u.SetVersion(V2)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func (g *generator) NewV3(ns UUID, name string) UUID {
+ u := newFromHash(md5.New(), ns, name)
+ u.SetVersion(V3)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV4 returns random generated UUID.
+func (g *generator) NewV4() UUID {
+ u := UUID{}
+ g.safeRandom(u[:])
+ u.SetVersion(V4)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func (g *generator) NewV5(ns UUID, name string) UUID {
+ u := newFromHash(sha1.New(), ns, name)
+ u.SetVersion(V5)
+ u.SetVariant(VariantRFC4122)
+
+ return u
+}
+
+func (g *generator) initStorage() {
+ g.initClockSequence()
+ g.initHardwareAddr()
+}
+
+func (g *generator) initClockSequence() {
+ buf := make([]byte, 2)
+ g.safeRandom(buf)
+ g.clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func (g *generator) initHardwareAddr() {
+ interfaces, err := net.Interfaces()
+ if err == nil {
+ for _, iface := range interfaces {
+ if len(iface.HardwareAddr) >= 6 {
+ copy(g.hardwareAddr[:], iface.HardwareAddr)
+ return
+ }
+ }
+ }
+
+ // Initialize hardwareAddr randomly in case
+ // of real network interfaces absence
+ g.safeRandom(g.hardwareAddr[:])
+
+ // Set multicast bit as recommended in RFC 4122
+ g.hardwareAddr[0] |= 0x01
+}
+
+func (g *generator) safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func (g *generator) getStorage() (uint64, uint16, []byte) {
+ g.storageOnce.Do(g.initStorage)
+
+ g.storageMutex.Lock()
+ defer g.storageMutex.Unlock()
+
+ timeNow := epochFunc()
+ // Clock changed backwards since last UUID generation.
+ // Should increase clock sequence.
+ if timeNow <= g.lastTime {
+ g.clockSequence++
+ }
+ g.lastTime = timeNow
+
+ return timeNow, g.clockSequence, g.hardwareAddr[:]
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+ return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
diff --git a/vendor/github.com/satori/uuid/generator_test.go b/vendor/github.com/satori/uuid/generator_test.go
new file mode 100644
index 0000000000..cd69e2efb8
--- /dev/null
+++ b/vendor/github.com/satori/uuid/generator_test.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type genTestSuite struct{}
+
+var _ = Suite(&genTestSuite{})
+
+func (s *genTestSuite) TestNewV1(c *C) {
+ u := NewV1()
+ c.Assert(u.Version(), Equals, V1)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+
+ u1 := NewV1()
+ u2 := NewV1()
+ c.Assert(u1, Not(Equals), u2)
+
+ oldFunc := epochFunc
+ epochFunc = func() uint64 { return 0 }
+
+ u3 := NewV1()
+ u4 := NewV1()
+ c.Assert(u3, Not(Equals), u4)
+
+ epochFunc = oldFunc
+}
+
+func (s *genTestSuite) BenchmarkNewV1(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV1()
+ }
+}
+
+func (s *genTestSuite) TestNewV2(c *C) {
+ u1 := NewV2(DomainPerson)
+ c.Assert(u1.Version(), Equals, V2)
+ c.Assert(u1.Variant(), Equals, VariantRFC4122)
+
+ u2 := NewV2(DomainGroup)
+ c.Assert(u2.Version(), Equals, V2)
+ c.Assert(u2.Variant(), Equals, VariantRFC4122)
+}
+
+func (s *genTestSuite) BenchmarkNewV2(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV2(DomainPerson)
+ }
+}
+
+func (s *genTestSuite) TestNewV3(c *C) {
+ u := NewV3(NamespaceDNS, "www.example.com")
+ c.Assert(u.Version(), Equals, V3)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+ c.Assert(u.String(), Equals, "5df41881-3aed-3515-88a7-2f4a814cf09e")
+
+ u = NewV3(NamespaceDNS, "python.org")
+ c.Assert(u.String(), Equals, "6fa459ea-ee8a-3ca4-894e-db77e160355e")
+
+ u1 := NewV3(NamespaceDNS, "golang.org")
+ u2 := NewV3(NamespaceDNS, "golang.org")
+ c.Assert(u1, Equals, u2)
+
+ u3 := NewV3(NamespaceDNS, "example.com")
+ c.Assert(u1, Not(Equals), u3)
+
+ u4 := NewV3(NamespaceURL, "golang.org")
+ c.Assert(u1, Not(Equals), u4)
+}
+
+func (s *genTestSuite) BenchmarkNewV3(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV3(NamespaceDNS, "www.example.com")
+ }
+}
+
+func (s *genTestSuite) TestNewV4(c *C) {
+ u := NewV4()
+ c.Assert(u.Version(), Equals, V4)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+}
+
+func (s *genTestSuite) BenchmarkNewV4(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV4()
+ }
+}
+
+func (s *genTestSuite) TestNewV5(c *C) {
+ u := NewV5(NamespaceDNS, "www.example.com")
+ c.Assert(u.Version(), Equals, V5)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+
+ u = NewV5(NamespaceDNS, "python.org")
+ c.Assert(u.String(), Equals, "886313e1-3b8a-5372-9b90-0c9aee199e5d")
+
+ u1 := NewV5(NamespaceDNS, "golang.org")
+ u2 := NewV5(NamespaceDNS, "golang.org")
+ c.Assert(u1, Equals, u2)
+
+ u3 := NewV5(NamespaceDNS, "example.com")
+ c.Assert(u1, Not(Equals), u3)
+
+ u4 := NewV5(NamespaceURL, "golang.org")
+ c.Assert(u1, Not(Equals), u4)
+}
+
+func (s *genTestSuite) BenchmarkNewV5(c *C) {
+ for i := 0; i < c.N; i++ {
+ NewV5(NamespaceDNS, "www.example.com")
+ }
+}
diff --git a/vendor/github.com/satori/uuid/sql.go b/vendor/github.com/satori/uuid/sql.go
new file mode 100644
index 0000000000..56759d3905
--- /dev/null
+++ b/vendor/github.com/satori/uuid/sql.go
@@ -0,0 +1,78 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+ return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case []byte:
+ if len(src) == Size {
+ return u.UnmarshalBinary(src)
+ }
+ return u.UnmarshalText(src)
+
+ case string:
+ return u.UnmarshalText([]byte(src))
+ }
+
+ return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+ UUID UUID
+ Valid bool
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+ if !u.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+ if src == nil {
+ u.UUID, u.Valid = Nil, false
+ return nil
+ }
+
+ // Delegate to UUID Scan function
+ u.Valid = true
+ return u.UUID.Scan(src)
+}
diff --git a/vendor/github.com/satori/uuid/sql_test.go b/vendor/github.com/satori/uuid/sql_test.go
new file mode 100644
index 0000000000..74255f50d9
--- /dev/null
+++ b/vendor/github.com/satori/uuid/sql_test.go
@@ -0,0 +1,136 @@
+// Copyright (C) 2013-2018 by Maxim Bublis
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type sqlTestSuite struct{}
+
+var _ = Suite(&sqlTestSuite{})
+
+func (s *sqlTestSuite) TestValue(c *C) {
+ u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+ c.Assert(err, IsNil)
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, u.String())
+}
+
+func (s *sqlTestSuite) TestValueNil(c *C) {
+ u := UUID{}
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, Equals, Nil.String())
+}
+
+func (s *sqlTestSuite) TestNullUUIDValueNil(c *C) {
+ u := NullUUID{}
+
+ val, err := u.Value()
+ c.Assert(err, IsNil)
+ c.Assert(val, IsNil)
+}
+
+func (s *sqlTestSuite) TestScanBinary(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ b2 := []byte{}
+ u2 := UUID{}
+
+ err = u2.Scan(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanString(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ u1 := UUID{}
+ err := u1.Scan(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ s2 := ""
+ u2 := UUID{}
+
+ err = u2.Scan(s2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanText(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+
+ u1 := UUID{}
+ err := u1.Scan(b1)
+ c.Assert(err, IsNil)
+ c.Assert(u, Equals, u1)
+
+ b2 := []byte("")
+ u2 := UUID{}
+ err = u2.Scan(b2)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanUnsupported(c *C) {
+ u := UUID{}
+
+ err := u.Scan(true)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestScanNil(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+
+ err := u.Scan(nil)
+ c.Assert(err, NotNil)
+}
+
+func (s *sqlTestSuite) TestNullUUIDScanValid(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+ s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+
+ u1 := NullUUID{}
+ err := u1.Scan(s1)
+ c.Assert(err, IsNil)
+ c.Assert(u1.Valid, Equals, true)
+ c.Assert(u1.UUID, Equals, u)
+}
+
+func (s *sqlTestSuite) TestNullUUIDScanNil(c *C) {
+ u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true}
+
+ err := u.Scan(nil)
+ c.Assert(err, IsNil)
+ c.Assert(u.Valid, Equals, false)
+ c.Assert(u.UUID, Equals, Nil)
+}
diff --git a/vendor/github.com/satori/uuid/uuid.go b/vendor/github.com/satori/uuid/uuid.go
index 295f3fc2c5..a2b8e2ca2a 100644
--- a/vendor/github.com/satori/uuid/uuid.go
+++ b/vendor/github.com/satori/uuid/uuid.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2013-2015 by Maxim Bublis
+// Copyright (C) 2013-2018 by Maxim Bublis
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
@@ -26,23 +26,29 @@ package uuid
import (
"bytes"
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "database/sql/driver"
- "encoding/binary"
"encoding/hex"
- "fmt"
- "hash"
- "net"
- "os"
- "sync"
- "time"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [Size]byte
+
+// UUID versions
+const (
+ _ byte = iota
+ V1
+ V2
+ V3
+ V4
+ V5
)
// UUID layout variants.
const (
- VariantNCS = iota
+ VariantNCS byte = iota
VariantRFC4122
VariantMicrosoft
VariantFuture
@@ -55,136 +61,48 @@ const (
DomainOrg
)
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-// Used in string method conversion
-const dash byte = '-'
-
-// UUID v1/v2 storage.
-var (
- storageMutex sync.Mutex
- storageOnce sync.Once
- epochFunc = unixTimeFunc
- clockSequence uint16
- lastTime uint64
- hardwareAddr [6]byte
- posixUID = uint32(os.Getuid())
- posixGID = uint32(os.Getgid())
-)
-
// String parse helpers.
var (
urnPrefix = []byte("urn:uuid:")
byteGroups = []int{8, 4, 4, 4, 12}
)
-func initClockSequence() {
- buf := make([]byte, 2)
- safeRandom(buf)
- clockSequence = binary.BigEndian.Uint16(buf)
-}
-
-func initHardwareAddr() {
- interfaces, err := net.Interfaces()
- if err == nil {
- for _, iface := range interfaces {
- if len(iface.HardwareAddr) >= 6 {
- copy(hardwareAddr[:], iface.HardwareAddr)
- return
- }
- }
- }
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence
- safeRandom(hardwareAddr[:])
-
- // Set multicast bit as recommended in RFC 4122
- hardwareAddr[0] |= 0x01
-}
-
-func initStorage() {
- initClockSequence()
- initHardwareAddr()
-}
-
-func safeRandom(dest []byte) {
- if _, err := rand.Read(dest); err != nil {
- panic(err)
- }
-}
-
-// Returns difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and current time.
-// This is default epoch calculation function.
-func unixTimeFunc() uint64 {
- return epochStart + uint64(time.Now().UnixNano()/100)
-}
-
-// UUID representation compliant with specification
-// described in RFC 4122.
-type UUID [16]byte
-
-// NullUUID can be used with the standard sql package to represent a
-// UUID value that can be NULL in the database
-type NullUUID struct {
- UUID UUID
- Valid bool
-}
-
-// The nil UUID is special form of UUID that is specified to have all
+// Nil is special form of UUID that is specified to have all
// 128 bits set to zero.
var Nil = UUID{}
// Predefined namespace UUIDs.
var (
- NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
- NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
- NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+ NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
)
-// And returns result of binary AND of two UUIDs.
-func And(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] & u2[i]
- }
- return u
-}
-
-// Or returns result of binary OR of two UUIDs.
-func Or(u1 UUID, u2 UUID) UUID {
- u := UUID{}
- for i := 0; i < 16; i++ {
- u[i] = u1[i] | u2[i]
- }
- return u
-}
-
// Equal returns true if u1 and u2 equals, otherwise returns false.
func Equal(u1 UUID, u2 UUID) bool {
return bytes.Equal(u1[:], u2[:])
}
// Version returns algorithm version used to generate UUID.
-func (u UUID) Version() uint {
- return uint(u[6] >> 4)
+func (u UUID) Version() byte {
+ return u[6] >> 4
}
// Variant returns UUID layout variant.
-func (u UUID) Variant() uint {
+func (u UUID) Variant() byte {
switch {
- case (u[8] & 0x80) == 0x00:
+ case (u[8] >> 7) == 0x00:
return VariantNCS
- case (u[8]&0xc0)|0x80 == 0x80:
+ case (u[8] >> 6) == 0x02:
return VariantRFC4122
- case (u[8]&0xe0)|0xc0 == 0xc0:
+ case (u[8] >> 5) == 0x06:
return VariantMicrosoft
+ case (u[8] >> 5) == 0x07:
+ fallthrough
+ default:
+ return VariantFuture
}
- return VariantFuture
}
// Bytes returns bytes slice representation of UUID.
@@ -198,13 +116,13 @@ func (u UUID) String() string {
buf := make([]byte, 36)
hex.Encode(buf[0:8], u[0:4])
- buf[8] = dash
+ buf[8] = '-'
hex.Encode(buf[9:13], u[4:6])
- buf[13] = dash
+ buf[13] = '-'
hex.Encode(buf[14:18], u[6:8])
- buf[18] = dash
+ buf[18] = '-'
hex.Encode(buf[19:23], u[8:10])
- buf[23] = dash
+ buf[23] = '-'
hex.Encode(buf[24:], u[10:])
return string(buf)
@@ -215,267 +133,29 @@ func (u *UUID) SetVersion(v byte) {
u[6] = (u[6] & 0x0f) | (v << 4)
}
-// SetVariant sets variant bits as described in RFC 4122.
-func (u *UUID) SetVariant() {
- u[8] = (u[8] & 0xbf) | 0x80
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String.
-func (u UUID) MarshalText() (text []byte, err error) {
- text = []byte(u.String())
- return
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-func (u *UUID) UnmarshalText(text []byte) (err error) {
- if len(text) < 32 {
- err = fmt.Errorf("uuid: UUID string too short: %s", text)
- return
- }
-
- t := text[:]
- braced := false
-
- if bytes.Equal(t[:9], urnPrefix) {
- t = t[9:]
- } else if t[0] == '{' {
- braced = true
- t = t[1:]
+// SetVariant sets variant bits.
+func (u *UUID) SetVariant(v byte) {
+ switch v {
+ case VariantNCS:
+ u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+ case VariantRFC4122:
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+ case VariantMicrosoft:
+ u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+ case VariantFuture:
+ fallthrough
+ default:
+ u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
}
-
- b := u[:]
-
- for i, byteGroup := range byteGroups {
- if i > 0 {
- if t[0] != '-' {
- err = fmt.Errorf("uuid: invalid string format")
- return
- }
- t = t[1:]
- }
-
- if len(t) < byteGroup {
- err = fmt.Errorf("uuid: UUID string too short: %s", text)
- return
- }
-
- if i == 4 && len(t) > byteGroup &&
- ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
- err = fmt.Errorf("uuid: UUID string too long: %s", text)
- return
- }
-
- _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
- if err != nil {
- return
- }
-
- t = t[byteGroup:]
- b = b[byteGroup/2:]
- }
-
- return
}
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() (data []byte, err error) {
- data = u.Bytes()
- return
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) (err error) {
- if len(data) != 16 {
- err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- return
- }
- copy(u[:], data)
-
- return
-}
-
-// Value implements the driver.Valuer interface.
-func (u UUID) Value() (driver.Value, error) {
- return u.String(), nil
-}
-
-// Scan implements the sql.Scanner interface.
-// A 16-byte slice is handled by UnmarshalBinary, while
-// a longer byte slice or a string is handled by UnmarshalText.
-func (u *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- if len(src) == 16 {
- return u.UnmarshalBinary(src)
- }
- return u.UnmarshalText(src)
-
- case string:
- return u.UnmarshalText([]byte(src))
- }
-
- return fmt.Errorf("uuid: cannot convert %T to UUID", src)
-}
-
-// Value implements the driver.Valuer interface.
-func (u NullUUID) Value() (driver.Value, error) {
- if !u.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return u.UUID.Value()
-}
-
-// Scan implements the sql.Scanner interface.
-func (u *NullUUID) Scan(src interface{}) error {
- if src == nil {
- u.UUID, u.Valid = Nil, false
- return nil
- }
-
- // Delegate to UUID Scan function
- u.Valid = true
- return u.UUID.Scan(src)
-}
-
-// FromBytes returns UUID converted from raw byte slice input.
-// It will return error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (u UUID, err error) {
- err = u.UnmarshalBinary(input)
- return
-}
-
-// FromBytesOrNil returns UUID converted from raw byte slice input.
-// Same behavior as FromBytes, but returns a Nil UUID on error.
-func FromBytesOrNil(input []byte) UUID {
- uuid, err := FromBytes(input)
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
+func Must(u UUID, err error) UUID {
if err != nil {
- return Nil
- }
- return uuid
-}
-
-// FromString returns UUID parsed from string input.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(input string) (u UUID, err error) {
- err = u.UnmarshalText([]byte(input))
- return
-}
-
-// FromStringOrNil returns UUID parsed from string input.
-// Same behavior as FromString, but returns a Nil UUID on error.
-func FromStringOrNil(input string) UUID {
- uuid, err := FromString(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// Returns UUID v1/v2 storage state.
-// Returns epoch timestamp, clock sequence, and hardware address.
-func getStorage() (uint64, uint16, []byte) {
- storageOnce.Do(initStorage)
-
- storageMutex.Lock()
- defer storageMutex.Unlock()
-
- timeNow := epochFunc()
- // Clock changed backwards since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= lastTime {
- clockSequence++
- }
- lastTime = timeNow
-
- return timeNow, clockSequence, hardwareAddr[:]
-}
-
-// NewV1 returns UUID based on current timestamp and MAC address.
-func NewV1() UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := getStorage()
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(1)
- u.SetVariant()
-
- return u
-}
-
-// NewV2 returns DCE Security UUID based on POSIX UID/GID.
-func NewV2(domain byte) UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := getStorage()
-
- switch domain {
- case DomainPerson:
- binary.BigEndian.PutUint32(u[0:], posixUID)
- case DomainGroup:
- binary.BigEndian.PutUint32(u[0:], posixGID)
+ panic(err)
}
-
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
- u[9] = domain
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(2)
- u.SetVariant()
-
- return u
-}
-
-// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(3)
- u.SetVariant()
-
- return u
-}
-
-// NewV4 returns random generated UUID.
-func NewV4() UUID {
- u := UUID{}
- safeRandom(u[:])
- u.SetVersion(4)
- u.SetVariant()
-
- return u
-}
-
-// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(5)
- u.SetVariant()
-
- return u
-}
-
-// Returns UUID based on hashing of namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
return u
}
diff --git a/vendor/github.com/satori/uuid/uuid_test.go b/vendor/github.com/satori/uuid/uuid_test.go
index 56504808f9..beb336d577 100644
--- a/vendor/github.com/satori/uuid/uuid_test.go
+++ b/vendor/github.com/satori/uuid/uuid_test.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2013, 2015 by Maxim Bublis
+// Copyright (C) 2013-2018 by Maxim Bublis
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
@@ -24,610 +24,67 @@ package uuid
import (
"bytes"
"testing"
+
+ . "gopkg.in/check.v1"
)
-func TestBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+// Hook up gocheck into the "go test" runner.
+func TestUUID(t *testing.T) { TestingT(t) }
- bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
+type testSuite struct{}
- if !bytes.Equal(u.Bytes(), bytes1) {
- t.Errorf("Incorrect bytes representation for UUID: %s", u)
- }
-}
+var _ = Suite(&testSuite{})
-func TestString(t *testing.T) {
- if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" {
- t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String())
- }
-}
+func (s *testSuite) TestBytes(c *C) {
+ u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-func TestEqual(t *testing.T) {
- if !Equal(NamespaceDNS, NamespaceDNS) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS)
- }
+ bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- if Equal(NamespaceDNS, NamespaceURL) {
- t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL)
- }
+ c.Assert(bytes.Equal(u.Bytes(), bytes1), Equals, true)
}
-func TestOr(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
- if !Equal(u, Or(u1, u2)) {
- t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2))
- }
+func (s *testSuite) TestString(c *C) {
+ c.Assert(NamespaceDNS.String(), Equals, "6ba7b810-9dad-11d1-80b4-00c04fd430c8")
}
-func TestAnd(t *testing.T) {
- u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff}
- u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00}
-
- u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if !Equal(u, And(u1, u2)) {
- t.Errorf("Incorrect bitwise AND result %s", And(u1, u2))
- }
+func (s *testSuite) TestEqual(c *C) {
+ c.Assert(Equal(NamespaceDNS, NamespaceDNS), Equals, true)
+ c.Assert(Equal(NamespaceDNS, NamespaceURL), Equals, false)
}
-func TestVersion(t *testing.T) {
+func (s *testSuite) TestVersion(c *C) {
u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u.Version() != 1 {
- t.Errorf("Incorrect version for UUID: %d", u.Version())
- }
+ c.Assert(u.Version(), Equals, V1)
}
-func TestSetVersion(t *testing.T) {
+func (s *testSuite) TestSetVersion(c *C) {
u := UUID{}
u.SetVersion(4)
-
- if u.Version() != 4 {
- t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version())
- }
+ c.Assert(u.Version(), Equals, V4)
}
-func TestVariant(t *testing.T) {
+func (s *testSuite) TestVariant(c *C) {
u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u1.Variant() != VariantNCS {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant())
- }
+ c.Assert(u1.Variant(), Equals, VariantNCS)
u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant())
- }
+ c.Assert(u2.Variant(), Equals, VariantRFC4122)
u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u3.Variant() != VariantMicrosoft {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant())
- }
+ c.Assert(u3.Variant(), Equals, VariantMicrosoft)
u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
-
- if u4.Variant() != VariantFuture {
- t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant())
- }
-}
-
-func TestSetVariant(t *testing.T) {
- u := new(UUID)
- u.SetVariant()
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant())
- }
-}
-
-func TestFromBytes(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1, err := FromBytes(b1)
- if err != nil {
- t.Errorf("Error parsing UUID from bytes: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
-
- _, err = FromBytes(b2)
- if err == nil {
- t.Errorf("Should return error parsing from empty byte slice, got %s", err)
- }
-}
-
-func TestMarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- b2, err := u.MarshalBinary()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
+ c.Assert(u4.Variant(), Equals, VariantFuture)
}
-func TestUnmarshalBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1 := UUID{}
- err := u1.UnmarshalBinary(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
- u2 := UUID{}
-
- err = u2.UnmarshalBinary(b2)
- if err == nil {
- t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
- }
-}
-
-func TestFromString(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
- s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- _, err := FromString("")
- if err == nil {
- t.Errorf("Should return error trying to parse empty string, got %s", err)
- }
-
- u1, err := FromString(s1)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- u2, err := FromString(s2)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u2) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u2)
- }
-
- u3, err := FromString(s3)
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- if !Equal(u, u3) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u3)
- }
-}
-
-func TestFromStringShort(t *testing.T) {
- // Invalid 35-character UUID string
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c"
-
- for i := len(s1); i >= 0; i-- {
- _, err := FromString(s1[:i])
- if err == nil {
- t.Errorf("Should return error trying to parse too short string, got %s", err)
- }
- }
-}
-
-func TestFromStringLong(t *testing.T) {
- // Invalid 37+ character UUID string
- s := []string{
- "6ba7b810-9dad-11d1-80b4-00c04fd430c8=",
- "6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
- "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f",
- "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8",
- }
-
- for _, str := range s {
- _, err := FromString(str)
- if err == nil {
- t.Errorf("Should return error trying to parse too long string, passed %s", str)
- }
- }
-}
-
-func TestFromStringInvalid(t *testing.T) {
- // Invalid UUID string formats
- s := []string{
- "6ba7b8109dad11d180b400c04fd430c8",
- "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8",
- "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
- "6ba7b8109-dad-11d1-80b4-00c04fd430c8",
- "6ba7b810-9dad1-1d1-80b4-00c04fd430c8",
- "6ba7b810-9dad-11d18-0b4-00c04fd430c8",
- "6ba7b810-9dad-11d1-80b40-0c04fd430c8",
- "6ba7b810+9dad+11d1+80b4+00c04fd430c8",
- "6ba7b810-9dad11d180b400c04fd430c8",
- "6ba7b8109dad-11d180b400c04fd430c8",
- "6ba7b8109dad11d1-80b400c04fd430c8",
- "6ba7b8109dad11d180b4-00c04fd430c8",
- }
-
- for _, str := range s {
- _, err := FromString(str)
- if err == nil {
- t.Errorf("Should return error trying to parse invalid string, passed %s", str)
- }
- }
-}
-
-func TestFromStringOrNil(t *testing.T) {
- u := FromStringOrNil("")
- if u != Nil {
- t.Errorf("Should return Nil UUID on parse failure, got %s", u)
- }
-}
-
-func TestFromBytesOrNil(t *testing.T) {
- b := []byte{}
- u := FromBytesOrNil(b)
- if u != Nil {
- t.Errorf("Should return Nil UUID on parse failure, got %s", u)
- }
-}
-
-func TestMarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- b2, err := u.MarshalText()
- if err != nil {
- t.Errorf("Error marshaling UUID: %s", err)
- }
-
- if !bytes.Equal(b1, b2) {
- t.Errorf("Marshaled UUID should be %s, got %s", b1, b2)
- }
-}
-
-func TestUnmarshalText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- u1 := UUID{}
- err := u1.UnmarshalText(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte("")
- u2 := UUID{}
-
- err = u2.UnmarshalText(b2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestValue(t *testing.T) {
- u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- if err != nil {
- t.Errorf("Error parsing UUID from string: %s", err)
- }
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != u.String() {
- t.Errorf("Wrong value returned, should be equal: %s and %s", val, u)
- }
-}
-
-func TestValueNil(t *testing.T) {
+func (s *testSuite) TestSetVariant(c *C) {
u := UUID{}
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != Nil.String() {
- t.Errorf("Wrong value returned, should be equal to UUID.Nil: %s", val)
- }
-}
-
-func TestNullUUIDValueNil(t *testing.T) {
- u := NullUUID{}
-
- val, err := u.Value()
- if err != nil {
- t.Errorf("Error getting UUID value: %s", err)
- }
-
- if val != nil {
- t.Errorf("Wrong value returned, should be nil: %s", val)
- }
-}
-
-func TestScanBinary(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- u1 := UUID{}
- err := u1.Scan(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte{}
- u2 := UUID{}
-
- err = u2.Scan(b2)
- if err == nil {
- t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err)
- }
-}
-
-func TestScanString(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- u1 := UUID{}
- err := u1.Scan(s1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- s2 := ""
- u2 := UUID{}
-
- err = u2.Scan(s2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestScanText(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
-
- u1 := UUID{}
- err := u1.Scan(b1)
- if err != nil {
- t.Errorf("Error unmarshaling UUID: %s", err)
- }
-
- if !Equal(u, u1) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1)
- }
-
- b2 := []byte("")
- u2 := UUID{}
-
- err = u2.Scan(b2)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from empty string")
- }
-}
-
-func TestScanUnsupported(t *testing.T) {
- u := UUID{}
-
- err := u.Scan(true)
- if err == nil {
- t.Errorf("Should return error trying to unmarshal from bool")
- }
-}
-
-func TestScanNil(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
-
- err := u.Scan(nil)
- if err == nil {
- t.Errorf("Error UUID shouldn't allow unmarshalling from nil")
- }
-}
-
-func TestNullUUIDScanValid(t *testing.T) {
- u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}
- s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-
- u1 := NullUUID{}
- err := u1.Scan(s1)
- if err != nil {
- t.Errorf("Error unmarshaling NullUUID: %s", err)
- }
-
- if !u1.Valid {
- t.Errorf("NullUUID should be valid")
- }
-
- if !Equal(u, u1.UUID) {
- t.Errorf("UUIDs should be equal: %s and %s", u, u1.UUID)
- }
-}
-
-func TestNullUUIDScanNil(t *testing.T) {
- u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true}
-
- err := u.Scan(nil)
- if err != nil {
- t.Errorf("Error unmarshaling NullUUID: %s", err)
- }
-
- if u.Valid {
- t.Errorf("NullUUID should not be valid")
- }
-
- if !Equal(u.UUID, Nil) {
- t.Errorf("NullUUID value should be equal to Nil: %v", u)
- }
-}
-
-func TestNewV1(t *testing.T) {
- u := NewV1()
-
- if u.Version() != 1 {
- t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant())
- }
-
- u1 := NewV1()
- u2 := NewV1()
-
- if Equal(u1, u2) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2)
- }
-
- oldFunc := epochFunc
- epochFunc = func() uint64 { return 0 }
-
- u3 := NewV1()
- u4 := NewV1()
-
- if Equal(u3, u4) {
- t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4)
- }
-
- epochFunc = oldFunc
-}
-
-func TestNewV2(t *testing.T) {
- u1 := NewV2(DomainPerson)
-
- if u1.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version())
- }
-
- if u1.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant())
- }
-
- u2 := NewV2(DomainGroup)
-
- if u2.Version() != 2 {
- t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version())
- }
-
- if u2.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant())
- }
-}
-
-func TestNewV3(t *testing.T) {
- u := NewV3(NamespaceDNS, "www.example.com")
-
- if u.Version() != 3 {
- t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant())
- }
-
- if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u = NewV3(NamespaceDNS, "python.org")
-
- if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" {
- t.Errorf("UUIDv3 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV3(NamespaceDNS, "golang.org")
- u2 := NewV3(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV3(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV3(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
-}
-
-func TestNewV4(t *testing.T) {
- u := NewV4()
-
- if u.Version() != 4 {
- t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant())
- }
-}
-
-func TestNewV5(t *testing.T) {
- u := NewV5(NamespaceDNS, "www.example.com")
-
- if u.Version() != 5 {
- t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version())
- }
-
- if u.Variant() != VariantRFC4122 {
- t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant())
- }
-
- u = NewV5(NamespaceDNS, "python.org")
-
- if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" {
- t.Errorf("UUIDv5 generated incorrectly: %s", u.String())
- }
-
- u1 := NewV5(NamespaceDNS, "golang.org")
- u2 := NewV5(NamespaceDNS, "golang.org")
- if !Equal(u1, u2) {
- t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2)
- }
-
- u3 := NewV5(NamespaceDNS, "example.com")
- if Equal(u1, u3) {
- t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2)
- }
-
- u4 := NewV5(NamespaceURL, "golang.org")
- if Equal(u1, u4) {
- t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4)
- }
+ u.SetVariant(VariantNCS)
+ c.Assert(u.Variant(), Equals, VariantNCS)
+ u.SetVariant(VariantRFC4122)
+ c.Assert(u.Variant(), Equals, VariantRFC4122)
+ u.SetVariant(VariantMicrosoft)
+ c.Assert(u.Variant(), Equals, VariantMicrosoft)
+ u.SetVariant(VariantFuture)
+ c.Assert(u.Variant(), Equals, VariantFuture)
}
diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go
index c7244b7822..ea94bd4950 100644
--- a/vendor/golang.org/x/net/dns/dnsmessage/message.go
+++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go
@@ -13,7 +13,7 @@ import (
"errors"
)
-// Packet formats
+// Message formats
// A Type is a type of DNS request and response.
type Type uint16
@@ -826,8 +826,8 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) {
// unconditionally enabling it is fine.
//
// DNS lookups are typically done over UDP, and RFC 1035 states that UDP
- // DNS packets can be a maximum of 512 bytes long. Without compression,
- // many DNS response packets are over this limit, so enabling
+ // DNS messages can be a maximum of 512 bytes long. Without compression,
+ // many DNS response messages are over this limit, so enabling
// compression will help ensure compliance.
compression := map[string]int{}
@@ -1207,7 +1207,7 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {
return nil
}
-// Finish ends message building and generates a binary packet.
+// Finish ends message building and generates a binary message.
func (b *Builder) Finish() ([]byte, error) {
if b.section < sectionHeader {
return nil, ErrNotStarted
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
index b65fc6d423..088d6e2bdb 100644
--- a/vendor/golang.org/x/net/http2/configure_transport.go
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req)
- if err == ErrNoCachedConn {
+ if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol
}
return res, err
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 7a502263a5..460ede03b1 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2509,7 +2509,6 @@ func checkWriteHeaderCode(code int) {
}
func (w *responseWriter) WriteHeader(code int) {
- checkWriteHeaderCode(code)
rws := w.rws
if rws == nil {
panic("WriteHeader called after Handler finished")
@@ -2519,6 +2518,7 @@ func (w *responseWriter) WriteHeader(code int) {
func (rws *responseWriterState) writeHeader(code int) {
if !rws.wroteHeader {
+ checkWriteHeaderCode(code)
rws.wroteHeader = true
rws.status = code
if len(rws.handlerHeader) > 0 {
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index c65f1a3976..e6b321f4bb 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -306,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return
}
-var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type noCachedConnError struct{}
+
+func (noCachedConnError) IsHTTP2NoCachedConnError() {}
+func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var ErrNoCachedConn error = noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method.
type RoundTripOpt struct {
diff --git a/vendor/golang.org/x/sys/README b/vendor/golang.org/x/sys/README
deleted file mode 100644
index bd422b40c2..0000000000
--- a/vendor/golang.org/x/sys/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This repository holds supplemental Go packages for low-level interactions with the operating system.
-
-To submit changes to this repository, see http://golang.org/doc/contribute.html.
diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md
new file mode 100644
index 0000000000..ef6c9e59c2
--- /dev/null
+++ b/vendor/golang.org/x/sys/README.md
@@ -0,0 +1,18 @@
+# sys
+
+This repository holds supplemental Go packages for low-level interactions with
+the operating system.
+
+## Download/Install
+
+The easiest way to install is to run `go get -u golang.org/x/sys`. You can
+also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the sys repository is located at
+https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the
+subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/sys/plan9/asm.s b/vendor/golang.org/x/sys/plan9/asm.s
index d4ca868f17..06449ebfa9 100644
--- a/vendor/golang.org/x/sys/plan9/asm.s
+++ b/vendor/golang.org/x/sys/plan9/asm.s
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/plan9/env_plan9.go b/vendor/golang.org/x/sys/plan9/env_plan9.go
index 25a96e7ea9..8f1918004f 100644
--- a/vendor/golang.org/x/sys/plan9/env_plan9.go
+++ b/vendor/golang.org/x/sys/plan9/env_plan9.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -25,3 +25,7 @@ func Clearenv() {
func Environ() []string {
return syscall.Environ()
}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/plan9/env_unset.go b/vendor/golang.org/x/sys/plan9/env_unset.go
deleted file mode 100644
index c37fc26e44..0000000000
--- a/vendor/golang.org/x/sys/plan9/env_unset.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.4
-
-package plan9
-
-import "syscall"
-
-func Unsetenv(key string) error {
- // This was added in Go 1.4.
- return syscall.Unsetenv(key)
-}
diff --git a/vendor/golang.org/x/sys/plan9/errors_plan9.go b/vendor/golang.org/x/sys/plan9/errors_plan9.go
index 110cf6a302..65fe74d3ef 100644
--- a/vendor/golang.org/x/sys/plan9/errors_plan9.go
+++ b/vendor/golang.org/x/sys/plan9/errors_plan9.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go
index c7ff5df2e6..42edd93ef9 100644
--- a/vendor/golang.org/x/sys/plan9/race.go
+++ b/vendor/golang.org/x/sys/plan9/race.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go
index 06cabcc762..c89cf8fc0d 100644
--- a/vendor/golang.org/x/sys/plan9/race0.go
+++ b/vendor/golang.org/x/sys/plan9/race0.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go
index df6f8c5b92..5046cfe7ff 100644
--- a/vendor/golang.org/x/sys/plan9/syscall.go
+++ b/vendor/golang.org/x/sys/plan9/syscall.go
@@ -5,10 +5,10 @@
// +build plan9
// Package plan9 contains an interface to the low-level operating system
-// primitives. OS details vary depending on the underlying system, and
+// primitives. OS details vary depending on the underlying system, and
// by default, godoc will display the OS-specific documentation for the current
-// system. If you want godoc to display documentation for another
-// system, set $GOOS and $GOARCH to the desired system. For example, if
+// system. If you want godoc to display documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
// to freebsd and $GOARCH to arm.
// The primary use of this package is inside other packages that provide a more
diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore
index e482715909..e3e0fc6f89 100644
--- a/vendor/golang.org/x/sys/unix/.gitignore
+++ b/vendor/golang.org/x/sys/unix/.gitignore
@@ -1 +1,2 @@
_obj/
+unix.test
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
new file mode 100644
index 0000000000..bc6f6031f1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -0,0 +1,173 @@
+# Building `sys/unix`
+
+The sys/unix package provides access to the raw system call interface of the
+underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
+
+Porting Go to a new architecture/OS combination or adding syscalls, types, or
+constants to an existing architecture/OS pair requires some manual effort;
+however, there are tools that automate much of the process.
+
+## Build Systems
+
+There are currently two ways we generate the necessary files. We are currently
+migrating the build system to use containers so the builds are reproducible.
+This is being done on an OS-by-OS basis. Please update this documentation as
+components of the build system change.
+
+### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`)
+
+The old build system generates the Go files based on the C header files
+present on your system. This means that files
+for a given GOOS/GOARCH pair must be generated on a system with that OS and
+architecture. This also means that the generated code can differ from system
+to system, based on differences in the header files.
+
+To avoid this, if you are using the old build system, only generate the Go
+files on an installation with unmodified header files. It is also important to
+keep track of which version of the OS the files were generated from (ex.
+Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
+and have each OS upgrade correspond to a single change.
+
+To build the files for your current OS and architecture, make sure GOOS and
+GOARCH are set correctly and run `mkall.sh`. This will generate the files for
+your specific system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, perl, go
+
+### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`)
+
+The new build system uses a Docker container to generate the go files directly
+from source checkouts of the kernel and various system libraries. This means
+that on any platform that supports Docker, all the files using the new build
+system can be generated at once, and generated files will not change based on
+what the person running the scripts has installed on their computer.
+
+The OS specific files for the new build system are located in the `${GOOS}`
+directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
+the kernel or system library updates, modify the Dockerfile at
+`${GOOS}/Dockerfile` to checkout the new release of the source.
+
+To build all the files under the new build system, you must be on an amd64/Linux
+system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
+then generate all of the files for all of the GOOS/GOARCH pairs in the new build
+system. Running `mkall.sh -n` shows the commands that will be run.
+
+Requirements: bash, perl, go, docker
+
+## Component files
+
+This section describes the various files used in the code generation process.
+It also contains instructions on how to modify these files to add a new
+architecture/OS or to add additional syscalls, types, or constants. Note that
+if you are using the new build system, the scripts cannot be called normally.
+They must be called from within the docker container.
+
+### asm files
+
+The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
+call dispatch. There are three entry points:
+```
+ func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+ func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+```
+The first and second are the standard ones; they differ only in how many
+arguments can be passed to the kernel. The third is for low-level use by the
+ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
+let it know that a system call is running.
+
+When porting Go to an new architecture/OS, this file must be implemented for
+each GOOS/GOARCH pair.
+
+### mksysnum
+
+Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
+for the old system). This script takes in a list of header files containing the
+syscall number declarations and parses them to produce the corresponding list of
+Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
+constants.
+
+Adding new syscall numbers is mostly done by running the build on a sufficiently
+new installation of the target OS (or updating the source checkouts for the
+new build system). However, depending on the OS, you make need to update the
+parsing in mksysnum.
+
+### mksyscall.pl
+
+The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
+hand-written Go files which implement system calls (for unix, the specific OS,
+or the specific OS/Architecture pair respectively) that need special handling
+and list `//sys` comments giving prototypes for ones that can be generated.
+
+The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
+them into syscalls. This requires the name of the prototype in the comment to
+match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
+prototype can be exported (capitalized) or not.
+
+Adding a new syscall often just requires adding a new `//sys` function prototype
+with the desired arguments and a capitalized name so it is exported. However, if
+you want the interface to the syscall to be different, often one will make an
+unexported `//sys` prototype, an then write a custom wrapper in
+`syscall_${GOOS}.go`.
+
+### types files
+
+For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
+`types_${GOOS}.go` on the old system). This file includes standard C headers and
+creates Go type aliases to the corresponding C types. The file is then fed
+through godef to get the Go compatible definitions. Finally, the generated code
+is fed though mkpost.go to format the code correctly and remove any hidden or
+private identifiers. This cleaned-up code is written to
+`ztypes_${GOOS}_${GOARCH}.go`.
+
+The hardest part about preparing this file is figuring out which headers to
+include and which symbols need to be `#define`d to get the actual data
+structures that pass through to the kernel system calls. Some C libraries
+preset alternate versions for binary compatibility and translate them on the
+way in and out of system calls, but there is almost always a `#define` that can
+get the real ones.
+See `types_darwin.go` and `linux/types.go` for examples.
+
+To add a new type, add in the necessary include statement at the top of the
+file (if it is not already there) and add in a type alias line. Note that if
+your type is significantly different on different architectures, you may need
+some `#if/#elif` macros in your include statements.
+
+### mkerrors.sh
+
+This script is used to generate the system's various constants. This doesn't
+just include the error numbers and error strings, but also the signal numbers
+an a wide variety of miscellaneous constants. The constants come from the list
+of include files in the `includes_${uname}` variable. A regex then picks out
+the desired `#define` statements, and generates the corresponding Go constants.
+The error numbers and strings are generated from `#include `, and the
+signal numbers and strings are generated from `#include `. All of
+these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
+`_errors.c`, which prints out all the constants.
+
+To add a constant, add the header that includes it to the appropriate variable.
+Then, edit the regex (if necessary) to match the desired constant. Avoid making
+the regex too broad to avoid matching unintended constants.
+
+
+## Generated files
+
+### `zerror_${GOOS}_${GOARCH}.go`
+
+A file containing all of the system's generated error numbers, error strings,
+signal numbers, and constants. Generated by `mkerrors.sh` (see above).
+
+### `zsyscall_${GOOS}_${GOARCH}.go`
+
+A file containing all the generated syscalls for a specific GOOS and GOARCH.
+Generated by `mksyscall.pl` (see above).
+
+### `zsysnum_${GOOS}_${GOARCH}.go`
+
+A list of numeric constants for all the syscall number of the specific GOOS
+and GOARCH. Generated by mksysnum (see above).
+
+### `ztypes_${GOOS}_${GOARCH}.go`
+
+A file containing Go types for passing into (or returning from) syscalls.
+Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 0000000000..d81fbb5b4e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,124 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += onesCount64(uint64(b))
+ }
+ return c
+}
+
+// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64.
+// Once this package can require Go 1.9, we can delete this
+// and update the caller to use bits.OnesCount64.
+func onesCount64(x uint64) int {
+ const m0 = 0x5555555555555555 // 01010101 ...
+ const m1 = 0x3333333333333333 // 00110011 ...
+ const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
+ const m3 = 0x00ff00ff00ff00ff // etc.
+ const m4 = 0x0000ffff0000ffff
+
+ // Implementation: Parallel summing of adjacent bits.
+ // See "Hacker's Delight", Chap. 5: Counting Bits.
+ // The following pattern shows the general approach:
+ //
+ // x = x>>1&(m0&m) + x&(m0&m)
+ // x = x>>2&(m1&m) + x&(m1&m)
+ // x = x>>4&(m2&m) + x&(m2&m)
+ // x = x>>8&(m3&m) + x&(m3&m)
+ // x = x>>16&(m4&m) + x&(m4&m)
+ // x = x>>32&(m5&m) + x&(m5&m)
+ // return int(x)
+ //
+ // Masking (& operations) can be left away when there's no
+ // danger that a field's sum will carry over into the next
+ // field: Since the result cannot be > 64, 8 bits is enough
+ // and we can ignore the masks for the shifts by 8 and up.
+ // Per "Hacker's Delight", the first line can be simplified
+ // more, but it saves at best one instruction, so we leave
+ // it alone for clarity.
+ const m = 1<<64 - 1
+ x = x>>1&(m0&m) + x&(m0&m)
+ x = x>>2&(m1&m) + x&(m1&m)
+ x = (x>>4 + x) & (m2 & m)
+ x += x >> 8
+ x += x >> 16
+ x += x >> 32
+ return int(x) & (1<<7 - 1)
+}
diff --git a/vendor/golang.org/x/sys/unix/asm.s b/vendor/golang.org/x/sys/unix/asm.s
deleted file mode 100644
index 8ed2fdb94b..0000000000
--- a/vendor/golang.org/x/sys/unix/asm.s
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !gccgo
-
-#include "textflag.h"
-
-TEXT ·use(SB),NOSPLIT,$0
- RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s
index 4db2909323..448bebbb59 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_386.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -10,21 +10,51 @@
// System calls for 386, Linux
//
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-28
+TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime·entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
index 44e25c62f9..c6468a9588 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -13,17 +13,45 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
JMP syscall·gettimeofday(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
index cf0b574658..cf0f3575c1 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -13,17 +13,44 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-28
+TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime·entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
-TEXT ·seek(SB),NOSPLIT,$0-32
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
index 4be9bfedea..afe6fdf6b1 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -11,14 +11,42 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
index 724e580c4e..ab9d63831a 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -15,14 +15,42 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 0000000000..99e5399045
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,54 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips mipsle
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime·entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
index 8d231feb4b..649e58714d 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -15,14 +15,42 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
BR syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
index 11889859fb..a5a863c6bd 100644
--- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
+++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
new file mode 100644
index 0000000000..469bfa1003
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ B syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
index 43ed17a05f..ded8260f3e 100644
--- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
+++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
@@ -10,8 +10,8 @@
// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
//
-TEXT ·sysvicall6(SB),NOSPLIT,$0-64
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
JMP syscall·sysvicall6(SB)
-TEXT ·rawSysvicall6(SB),NOSPLIT,$0-64
+TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
JMP syscall·rawSysvicall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
new file mode 100644
index 0000000000..83b6bceab4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd
+
+package unix
+
+import (
+ errorspkg "errors"
+ "fmt"
+)
+
+// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
+
+const (
+ // This is the version of CapRights this package understands. See C implementation for parallels.
+ capRightsGoVersion = CAP_RIGHTS_VERSION_00
+ capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
+ capArSizeMax = capRightsGoVersion + 2
+)
+
+var (
+ bit2idx = []int{
+ -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
+ 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ }
+)
+
+func capidxbit(right uint64) int {
+ return int((right >> 57) & 0x1f)
+}
+
+func rightToIndex(right uint64) (int, error) {
+ idx := capidxbit(right)
+ if idx < 0 || idx >= len(bit2idx) {
+ return -2, fmt.Errorf("index for right 0x%x out of range", right)
+ }
+ return bit2idx[idx], nil
+}
+
+func caprver(right uint64) int {
+ return int(right >> 62)
+}
+
+func capver(rights *CapRights) int {
+ return caprver(rights.Rights[0])
+}
+
+func caparsize(rights *CapRights) int {
+ return capver(rights) + 2
+}
+
+// CapRightsSet sets the permissions in setrights in rights.
+func CapRightsSet(rights *CapRights, setrights []uint64) error {
+ // This is essentially a copy of cap_rights_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errorspkg.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errorspkg.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errorspkg.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errorspkg.New("index mismatch")
+ }
+ rights.Rights[i] |= right
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errorspkg.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsClear clears the permissions in clearrights from rights.
+func CapRightsClear(rights *CapRights, clearrights []uint64) error {
+ // This is essentially a copy of cap_rights_vclear()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return errorspkg.New("bad rights size")
+ }
+
+ for _, right := range clearrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return errorspkg.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return err
+ }
+ if i >= n {
+ return errorspkg.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errorspkg.New("index mismatch")
+ }
+ rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return errorspkg.New("index mismatch (after assign)")
+ }
+ }
+
+ return nil
+}
+
+// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
+func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
+ // This is essentially a copy of cap_rights_is_vset()
+ if capver(rights) != CAP_RIGHTS_VERSION_00 {
+ return false, fmt.Errorf("bad rights version %d", capver(rights))
+ }
+
+ n := caparsize(rights)
+ if n < capArSizeMin || n > capArSizeMax {
+ return false, errorspkg.New("bad rights size")
+ }
+
+ for _, right := range setrights {
+ if caprver(right) != CAP_RIGHTS_VERSION_00 {
+ return false, errorspkg.New("bad right version")
+ }
+ i, err := rightToIndex(right)
+ if err != nil {
+ return false, err
+ }
+ if i >= n {
+ return false, errorspkg.New("index overflow")
+ }
+ if capidxbit(rights.Rights[i]) != capidxbit(right) {
+ return false, errorspkg.New("index mismatch")
+ }
+ if (rights.Rights[i] & right) != right {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func capright(idx uint64, bit uint64) uint64 {
+ return ((1 << (57 + idx)) | bit)
+}
+
+// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
+// See man cap_rights_init(3) and rights(4).
+func CapRightsInit(rights []uint64) (*CapRights, error) {
+ var r CapRights
+ r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
+ r.Rights[1] = capright(1, 0)
+
+ err := CapRightsSet(&r, rights)
+ if err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
+// The capability rights on fd can never be increased by CapRightsLimit.
+// See man cap_rights_limit(2) and rights(4).
+func CapRightsLimit(fd uintptr, rights *CapRights) error {
+ return capRightsLimit(int(fd), rights)
+}
+
+// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
+// See man cap_rights_get(3) and rights(4).
+func CapRightsGet(fd uintptr) (*CapRights, error) {
+ r, err := CapRightsInit(nil)
+ if err != nil {
+ return nil, err
+ }
+ err = capRightsGet(capRightsGoVersion, int(fd), r)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/creds_test.go b/vendor/golang.org/x/sys/unix/creds_test.go
index eaae7c367f..6b292b19a7 100644
--- a/vendor/golang.org/x/sys/unix/creds_test.go
+++ b/vendor/golang.org/x/sys/unix/creds_test.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -8,6 +8,7 @@ package unix_test
import (
"bytes"
+ "go/build"
"net"
"os"
"syscall"
@@ -21,101 +22,131 @@ import (
// sockets. The SO_PASSCRED socket option is enabled on the sending
// socket for this to work.
func TestSCMCredentials(t *testing.T) {
- fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0)
- if err != nil {
- t.Fatalf("Socketpair: %v", err)
+ socketTypeTests := []struct {
+ socketType int
+ dataLen int
+ }{
+ {
+ unix.SOCK_STREAM,
+ 1,
+ }, {
+ unix.SOCK_DGRAM,
+ 0,
+ },
}
- defer unix.Close(fds[0])
- defer unix.Close(fds[1])
- err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1)
- if err != nil {
- t.Fatalf("SetsockoptInt: %v", err)
- }
+ for _, tt := range socketTypeTests {
+ if tt.socketType == unix.SOCK_DGRAM && !atLeast1p10() {
+ t.Log("skipping DGRAM test on pre-1.10")
+ continue
+ }
- srvFile := os.NewFile(uintptr(fds[0]), "server")
- defer srvFile.Close()
- srv, err := net.FileConn(srvFile)
- if err != nil {
- t.Errorf("FileConn: %v", err)
- return
- }
- defer srv.Close()
-
- cliFile := os.NewFile(uintptr(fds[1]), "client")
- defer cliFile.Close()
- cli, err := net.FileConn(cliFile)
- if err != nil {
- t.Errorf("FileConn: %v", err)
- return
- }
- defer cli.Close()
+ fds, err := unix.Socketpair(unix.AF_LOCAL, tt.socketType, 0)
+ if err != nil {
+ t.Fatalf("Socketpair: %v", err)
+ }
+ defer unix.Close(fds[0])
+ defer unix.Close(fds[1])
+
+ err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1)
+ if err != nil {
+ t.Fatalf("SetsockoptInt: %v", err)
+ }
+
+ srvFile := os.NewFile(uintptr(fds[0]), "server")
+ defer srvFile.Close()
+ srv, err := net.FileConn(srvFile)
+ if err != nil {
+ t.Errorf("FileConn: %v", err)
+ return
+ }
+ defer srv.Close()
+
+ cliFile := os.NewFile(uintptr(fds[1]), "client")
+ defer cliFile.Close()
+ cli, err := net.FileConn(cliFile)
+ if err != nil {
+ t.Errorf("FileConn: %v", err)
+ return
+ }
+ defer cli.Close()
+
+ var ucred unix.Ucred
+ if os.Getuid() != 0 {
+ ucred.Pid = int32(os.Getpid())
+ ucred.Uid = 0
+ ucred.Gid = 0
+ oob := unix.UnixCredentials(&ucred)
+ _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
+ if op, ok := err.(*net.OpError); ok {
+ err = op.Err
+ }
+ if sys, ok := err.(*os.SyscallError); ok {
+ err = sys.Err
+ }
+ if err != syscall.EPERM {
+ t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err)
+ }
+ }
- var ucred unix.Ucred
- if os.Getuid() != 0 {
ucred.Pid = int32(os.Getpid())
- ucred.Uid = 0
- ucred.Gid = 0
+ ucred.Uid = uint32(os.Getuid())
+ ucred.Gid = uint32(os.Getgid())
oob := unix.UnixCredentials(&ucred)
- _, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
- if op, ok := err.(*net.OpError); ok {
- err = op.Err
+
+ // On SOCK_STREAM, this is internally going to send a dummy byte
+ n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
+ if err != nil {
+ t.Fatalf("WriteMsgUnix: %v", err)
}
- if sys, ok := err.(*os.SyscallError); ok {
- err = sys.Err
+ if n != 0 {
+ t.Fatalf("WriteMsgUnix n = %d, want 0", n)
}
- if err != syscall.EPERM {
- t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err)
+ if oobn != len(oob) {
+ t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob))
}
- }
-
- ucred.Pid = int32(os.Getpid())
- ucred.Uid = uint32(os.Getuid())
- ucred.Gid = uint32(os.Getgid())
- oob := unix.UnixCredentials(&ucred)
- // this is going to send a dummy byte
- n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
- if err != nil {
- t.Fatalf("WriteMsgUnix: %v", err)
- }
- if n != 0 {
- t.Fatalf("WriteMsgUnix n = %d, want 0", n)
- }
- if oobn != len(oob) {
- t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob))
- }
+ oob2 := make([]byte, 10*len(oob))
+ n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2)
+ if err != nil {
+ t.Fatalf("ReadMsgUnix: %v", err)
+ }
+ if flags != 0 {
+ t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags)
+ }
+ if n != tt.dataLen {
+ t.Fatalf("ReadMsgUnix n = %d, want %d", n, tt.dataLen)
+ }
+ if oobn2 != oobn {
+ // without SO_PASSCRED set on the socket, ReadMsgUnix will
+ // return zero oob bytes
+ t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn)
+ }
+ oob2 = oob2[:oobn2]
+ if !bytes.Equal(oob, oob2) {
+ t.Fatal("ReadMsgUnix oob bytes don't match")
+ }
- oob2 := make([]byte, 10*len(oob))
- n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2)
- if err != nil {
- t.Fatalf("ReadMsgUnix: %v", err)
- }
- if flags != 0 {
- t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags)
- }
- if n != 1 {
- t.Fatalf("ReadMsgUnix n = %d, want 1 (dummy byte)", n)
- }
- if oobn2 != oobn {
- // without SO_PASSCRED set on the socket, ReadMsgUnix will
- // return zero oob bytes
- t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn)
- }
- oob2 = oob2[:oobn2]
- if !bytes.Equal(oob, oob2) {
- t.Fatal("ReadMsgUnix oob bytes don't match")
+ scm, err := unix.ParseSocketControlMessage(oob2)
+ if err != nil {
+ t.Fatalf("ParseSocketControlMessage: %v", err)
+ }
+ newUcred, err := unix.ParseUnixCredentials(&scm[0])
+ if err != nil {
+ t.Fatalf("ParseUnixCredentials: %v", err)
+ }
+ if *newUcred != ucred {
+ t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred)
+ }
}
+}
- scm, err := unix.ParseSocketControlMessage(oob2)
- if err != nil {
- t.Fatalf("ParseSocketControlMessage: %v", err)
- }
- newUcred, err := unix.ParseUnixCredentials(&scm[0])
- if err != nil {
- t.Fatalf("ParseUnixCredentials: %v", err)
- }
- if *newUcred != ucred {
- t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred)
+// atLeast1p10 reports whether we are running on Go 1.10 or later.
+func atLeast1p10() bool {
+ for _, ver := range build.Default.ReleaseTags {
+ if ver == "go1.10" {
+ return true
+ }
}
+ return false
}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 0000000000..8d1dc0fa3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin_test.go b/vendor/golang.org/x/sys/unix/dev_darwin_test.go
new file mode 100644
index 0000000000..bf1adf3a8f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin_test.go
@@ -0,0 +1,51 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // Most of the device major/minor numbers on Darwin are
+ // dynamically generated by devfs. These are some well-known
+ // static numbers.
+ {"/dev/ttyp0", 4, 0},
+ {"/dev/ttys0", 4, 48},
+ {"/dev/ptyp0", 5, 0},
+ {"/dev/ptyr0", 5, 32},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 0000000000..8502f202ce
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go
new file mode 100644
index 0000000000..9add376638
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly_test.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // Minor is a cookie instead of an index on DragonFlyBSD
+ {"/dev/null", 10, 0x00000002},
+ {"/dev/random", 10, 0x00000003},
+ {"/dev/urandom", 10, 0x00000004},
+ {"/dev/zero", 10, 0x0000000c},
+ {"/dev/bpf", 15, 0xffff00ff},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 0000000000..eba3b4bd38
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
new file mode 100644
index 0000000000..d165d6f308
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by the Linux kernel and glibc.
+//
+// The information below is extracted and adapted from bits/sysmacros.h in the
+// glibc sources:
+//
+// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
+// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
+// number and m is a hex digit of the minor number. This is backward compatible
+// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
+// backward compatible with the Linux kernel, which for some architectures uses
+// 32-bit dev_t, encoded as mmmM MMmm.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ major := uint32((dev & 0x00000000000fff00) >> 8)
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
+ return major
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) & 0x00000fff) << 8
+ dev |= (uint64(major) & 0xfffff000) << 32
+ dev |= (uint64(minor) & 0x000000ff) << 0
+ dev |= (uint64(minor) & 0xffffff00) << 12
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux_test.go b/vendor/golang.org/x/sys/unix/dev_linux_test.go
new file mode 100644
index 0000000000..2fd3eadd00
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_linux_test.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // well known major/minor numbers according to
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/admin-guide/devices.txt
+ {"/dev/null", 1, 3},
+ {"/dev/zero", 1, 5},
+ {"/dev/random", 1, 8},
+ {"/dev/full", 1, 7},
+ {"/dev/urandom", 1, 9},
+ {"/dev/tty", 5, 0},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 0000000000..b4a203d0c5
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xfff00000) >> 12)
+ return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x000fff00
+ dev |= (uint64(minor) << 12) & 0xfff00000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd_test.go b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go
new file mode 100644
index 0000000000..441058a104
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd_test.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // well known major/minor numbers according to /dev/MAKEDEV on
+ // NetBSD 8.0
+ {"/dev/null", 2, 2},
+ {"/dev/zero", 2, 12},
+ {"/dev/random", 46, 0},
+ {"/dev/urandom", 46, 1},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 0000000000..f3430c42ff
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+ minor := uint32((dev & 0x000000ff) >> 0)
+ minor |= uint32((dev & 0xffff0000) >> 8)
+ return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ dev := (uint64(major) << 8) & 0x0000ff00
+ dev |= (uint64(minor) << 8) & 0xffff0000
+ dev |= (uint64(minor) << 0) & 0x000000ff
+ return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd_test.go b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go
new file mode 100644
index 0000000000..e6cb64ff3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd_test.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // well known major/minor numbers according to /dev/MAKEDEV on
+ // OpenBSD 6.0
+ {"/dev/null", 2, 2},
+ {"/dev/zero", 2, 12},
+ {"/dev/ttyp0", 5, 0},
+ {"/dev/ttyp1", 5, 1},
+ {"/dev/random", 45, 0},
+ {"/dev/srandom", 45, 1},
+ {"/dev/urandom", 45, 2},
+ {"/dev/arandom", 45, 3},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_solaris_test.go b/vendor/golang.org/x/sys/unix/dev_solaris_test.go
new file mode 100644
index 0000000000..656508c971
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_solaris_test.go
@@ -0,0 +1,51 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package unix_test
+
+import (
+ "fmt"
+ "testing"
+
+ "golang.org/x/sys/unix"
+)
+
+func TestDevices(t *testing.T) {
+ testCases := []struct {
+ path string
+ major uint32
+ minor uint32
+ }{
+ // Well-known major/minor numbers on OpenSolaris according to
+ // /etc/name_to_major
+ {"/dev/zero", 134, 12},
+ {"/dev/null", 134, 2},
+ {"/dev/ptyp0", 172, 0},
+ {"/dev/ttyp0", 175, 0},
+ {"/dev/ttyp1", 175, 1},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("%s %v:%v", tc.path, tc.major, tc.minor), func(t *testing.T) {
+ var stat unix.Stat_t
+ err := unix.Stat(tc.path, &stat)
+ if err != nil {
+ t.Errorf("failed to stat device: %v", err)
+ return
+ }
+
+ dev := uint64(stat.Rdev)
+ if unix.Major(dev) != tc.major {
+ t.Errorf("for %s Major(%#x) == %d, want %d", tc.path, dev, unix.Major(dev), tc.major)
+ }
+ if unix.Minor(dev) != tc.minor {
+ t.Errorf("for %s Minor(%#x) == %d, want %d", tc.path, dev, unix.Minor(dev), tc.minor)
+ }
+ if unix.Mkdev(tc.major, tc.minor) != dev {
+ t.Errorf("for %s Mkdev(%d, %d) == %#x, want %#x", tc.path, tc.major, tc.minor, unix.Mkdev(tc.major, tc.minor), dev)
+ }
+ })
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
new file mode 100644
index 0000000000..bd475812b7
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+
+package unix
+
+import "unsafe"
+
+// readInt returns the size-bytes unsigned integer in native byte order at offset off.
+func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
+ if len(b) < int(off+size) {
+ return 0, false
+ }
+ if isBigEndian {
+ return readIntBE(b[off:], size), true
+ }
+ return readIntLE(b[off:], size), true
+}
+
+func readIntBE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[1]) | uint64(b[0])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+func readIntLE(b []byte, size uintptr) uint64 {
+ switch size {
+ case 1:
+ return uint64(b[0])
+ case 2:
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8
+ case 4:
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
+ case 8:
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ default:
+ panic("syscall: readInt with unsupported size")
+ }
+}
+
+// ParseDirent parses up to max directory entries in buf,
+// appending the names to names. It returns the number of
+// bytes consumed from buf, the number of entries added
+// to names, and the new names slice.
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ reclen, ok := direntReclen(buf)
+ if !ok || reclen > uint64(len(buf)) {
+ return origlen, count, names
+ }
+ rec := buf[:reclen]
+ buf = buf[reclen:]
+ ino, ok := direntIno(rec)
+ if !ok {
+ break
+ }
+ if ino == 0 { // File absent in directory.
+ continue
+ }
+ const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
+ namlen, ok := direntNamlen(rec)
+ if !ok || namoff+namlen > uint64(len(rec)) {
+ break
+ }
+ name := rec[namoff : namoff+namlen]
+ for i, c := range name {
+ if c == 0 {
+ name = name[:i]
+ break
+ }
+ }
+ // Check for useless names before allocating a string.
+ if string(name) == "." || string(name) == ".." {
+ continue
+ }
+ max--
+ count++
+ names = append(names, string(name))
+ }
+ return origlen - len(buf), count, names
+}
diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go
new file mode 100644
index 0000000000..5e9269063f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_big.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ppc64 s390x mips mips64
+
+package unix
+
+const isBigEndian = true
diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go
new file mode 100644
index 0000000000..085df2d8dd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/endian_little.go
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le
+
+package unix
+
+const isBigEndian = false
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
index 45e281a047..706b3cd1dd 100644
--- a/vendor/golang.org/x/sys/unix/env_unix.go
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2010 The Go Authors. All rights reserved.
+// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -25,3 +25,7 @@ func Clearenv() {
func Environ() []string {
return syscall.Environ()
}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go
deleted file mode 100644
index 9222262559..0000000000
--- a/vendor/golang.org/x/sys/unix/env_unset.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.4
-
-package unix
-
-import "syscall"
-
-func Unsetenv(key string) error {
- // This was added in Go 1.4.
- return syscall.Unsetenv(key)
-}
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
new file mode 100644
index 0000000000..c56bc8b05e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
@@ -0,0 +1,227 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
new file mode 100644
index 0000000000..3e9771175a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
@@ -0,0 +1,227 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
+// them here for backwards compatibility.
+
+package unix
+
+const (
+ IFF_SMART = 0x20
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf2
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_IPXIP = 0xf9
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8040720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8040720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
new file mode 100644
index 0000000000..856dca3254
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
@@ -0,0 +1,226 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const (
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BSC = 0x53
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf6
+ IFT_PFSYNC = 0xf7
+ IFT_PLC = 0xae
+ IFT_POS = 0xab
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf1
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_STF = 0xd7
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VOICEEM = 0x64
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+
+ // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go
+ IFF_SMART = 0x20
+ IFT_FAITH = 0xf2
+ IFT_IPXIP = 0xf9
+ IPPROTO_MAXID = 0x34
+ IPV6_FAITH = 0x1d
+ IP_FAITH = 0x16
+ MAP_NORESERVE = 0x40
+ MAP_RENAME = 0x20
+ NET_RT_MAXID = 0x6
+ RTF_PRCLONING = 0x10000
+ RTM_OLDADD = 0x9
+ RTM_OLDDEL = 0xa
+ SIOCADDRT = 0x8030720a
+ SIOCALIFADDR = 0x8118691b
+ SIOCDELRT = 0x8030720b
+ SIOCDLIFADDR = 0x8118691d
+ SIOCGLIFADDR = 0xc118691c
+ SIOCGLIFPHYADDR = 0xc118694b
+ SIOCSLIFPHYADDR = 0x8118694a
+)
diff --git a/vendor/golang.org/x/sys/unix/export_test.go b/vendor/golang.org/x/sys/unix/export_test.go
index b4fdd970b6..e8024690df 100644
--- a/vendor/golang.org/x/sys/unix/export_test.go
+++ b/vendor/golang.org/x/sys/unix/export_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go
index ce67a59528..2994ce75f2 100644
--- a/vendor/golang.org/x/sys/unix/flock.go
+++ b/vendor/golang.org/x/sys/unix/flock.go
@@ -1,5 +1,3 @@
-// +build linux darwin freebsd openbsd netbsd dragonfly
-
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
index 362831c3f7..fc0e50e037 100644
--- a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
+++ b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
@@ -1,4 +1,4 @@
-// +build linux,386 linux,arm
+// +build linux,386 linux,arm linux,mips linux,mipsle
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
index 94c8232124..50062e3c74 100644
--- a/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -8,12 +8,22 @@ package unix
import "syscall"
-// We can't use the gc-syntax .s files for gccgo. On the plus side
+// We can't use the gc-syntax .s files for gccgo. On the plus side
// much of the functionality can be written directly in Go.
+//extern gccgoRealSyscallNoError
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
//extern gccgoRealSyscall
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
syscall.Entersyscall()
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
@@ -35,6 +45,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
return r, 0, syscall.Errno(errno)
}
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0, syscall.Errno(errno)
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
index 07f6be0392..24e96b1198 100644
--- a/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -31,6 +31,12 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp
return r;
}
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
+
// Define the use function in C so that it is not inlined.
extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
index bffe1a77db..251a977a81 100644
--- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/sys/unix/linux/Dockerfile b/vendor/golang.org/x/sys/unix/linux/Dockerfile
new file mode 100644
index 0000000000..7fe5fc3c4f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/linux/Dockerfile
@@ -0,0 +1,51 @@
+FROM ubuntu:16.04
+
+# Use the most recent ubuntu sources
+RUN echo 'deb http://en.archive.ubuntu.com/ubuntu/ artful main universe' >> /etc/apt/sources.list
+
+# Dependencies to get the git sources and go binaries
+RUN apt-get update && apt-get install -y \
+ curl \
+ git \
+ && rm -rf /var/lib/apt/lists/*
+
+# Get the git sources. If not cached, this takes O(5 minutes).
+WORKDIR /git
+RUN git config --global advice.detachedHead false
+# Linux Kernel: Released 03 Sep 2017
+RUN git clone --branch v4.13 --depth 1 https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
+# GNU C library: Released 02 Aug 2017 (we should try to get a secure way to clone this)
+RUN git clone --branch glibc-2.26 --depth 1 git://sourceware.org/git/glibc.git
+
+# Get Go 1.9.2
+ENV GOLANG_VERSION 1.9.2
+ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz
+ENV GOLANG_DOWNLOAD_SHA256 de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b
+
+RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \
+ && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \
+ && tar -C /usr/local -xzf golang.tar.gz \
+ && rm golang.tar.gz
+
+ENV PATH /usr/local/go/bin:$PATH
+
+# Linux and Glibc build dependencies
+RUN apt-get update && apt-get install -y \
+ gawk make python \
+ gcc gcc-multilib \
+ gettext texinfo \
+ && rm -rf /var/lib/apt/lists/*
+# Emulator and cross compilers
+RUN apt-get update && apt-get install -y \
+ qemu \
+ gcc-aarch64-linux-gnu gcc-arm-linux-gnueabi \
+ gcc-mips-linux-gnu gcc-mips64-linux-gnuabi64 \
+ gcc-mips64el-linux-gnuabi64 gcc-mipsel-linux-gnu \
+ gcc-powerpc64-linux-gnu gcc-powerpc64le-linux-gnu \
+ gcc-s390x-linux-gnu gcc-sparc64-linux-gnu \
+ && rm -rf /var/lib/apt/lists/*
+
+# Let the scripts know they are in the docker environment
+ENV GOLANG_SYS_BUILD docker
+WORKDIR /build
+ENTRYPOINT ["go", "run", "linux/mkall.go", "/git/linux", "/git/glibc"]
diff --git a/vendor/golang.org/x/sys/unix/linux/mkall.go b/vendor/golang.org/x/sys/unix/linux/mkall.go
new file mode 100644
index 0000000000..89b2fe8864
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/linux/mkall.go
@@ -0,0 +1,482 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// linux/mkall.go - Generates all Linux zsysnum, zsyscall, zerror, and ztype
+// files for all 11 linux architectures supported by the go compiler. See
+// README.md for more information about the build system.
+
+// To run it you must have a git checkout of the Linux kernel and glibc. Once
+// the appropriate sources are ready, the program is run as:
+// go run linux/mkall.go
+
+// +build ignore
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+// These will be paths to the appropriate source directories.
+var LinuxDir string
+var GlibcDir string
+
+const TempDir = "/tmp"
+const IncludeDir = TempDir + "/include" // To hold our C headers
+const BuildDir = TempDir + "/build" // To hold intermediate build files
+
+const GOOS = "linux" // Only for Linux targets
+const BuildArch = "amd64" // Must be built on this architecture
+const MinKernel = "2.6.23" // https://golang.org/doc/install#requirements
+
+type target struct {
+ GoArch string // Architecture name according to Go
+ LinuxArch string // Architecture name according to the Linux Kernel
+ GNUArch string // Architecture name according to GNU tools (https://wiki.debian.org/Multiarch/Tuples)
+ BigEndian bool // Default Little Endian
+ SignedChar bool // Is -fsigned-char needed (default no)
+ Bits int
+}
+
+// List of the 11 Linux targets supported by the go compiler. sparc64 is not
+// currently supported, though a port is in progress.
+var targets = []target{
+ {
+ GoArch: "386",
+ LinuxArch: "x86",
+ GNUArch: "i686-linux-gnu", // Note "i686" not "i386"
+ Bits: 32,
+ },
+ {
+ GoArch: "amd64",
+ LinuxArch: "x86",
+ GNUArch: "x86_64-linux-gnu",
+ Bits: 64,
+ },
+ {
+ GoArch: "arm64",
+ LinuxArch: "arm64",
+ GNUArch: "aarch64-linux-gnu",
+ SignedChar: true,
+ Bits: 64,
+ },
+ {
+ GoArch: "arm",
+ LinuxArch: "arm",
+ GNUArch: "arm-linux-gnueabi",
+ Bits: 32,
+ },
+ {
+ GoArch: "mips",
+ LinuxArch: "mips",
+ GNUArch: "mips-linux-gnu",
+ BigEndian: true,
+ Bits: 32,
+ },
+ {
+ GoArch: "mipsle",
+ LinuxArch: "mips",
+ GNUArch: "mipsel-linux-gnu",
+ Bits: 32,
+ },
+ {
+ GoArch: "mips64",
+ LinuxArch: "mips",
+ GNUArch: "mips64-linux-gnuabi64",
+ BigEndian: true,
+ Bits: 64,
+ },
+ {
+ GoArch: "mips64le",
+ LinuxArch: "mips",
+ GNUArch: "mips64el-linux-gnuabi64",
+ Bits: 64,
+ },
+ {
+ GoArch: "ppc64",
+ LinuxArch: "powerpc",
+ GNUArch: "powerpc64-linux-gnu",
+ BigEndian: true,
+ Bits: 64,
+ },
+ {
+ GoArch: "ppc64le",
+ LinuxArch: "powerpc",
+ GNUArch: "powerpc64le-linux-gnu",
+ Bits: 64,
+ },
+ {
+ GoArch: "s390x",
+ LinuxArch: "s390",
+ GNUArch: "s390x-linux-gnu",
+ BigEndian: true,
+ SignedChar: true,
+ Bits: 64,
+ },
+ // {
+ // GoArch: "sparc64",
+ // LinuxArch: "sparc",
+ // GNUArch: "sparc64-linux-gnu",
+ // BigEndian: true,
+ // Bits: 64,
+ // },
+}
+
+// ptracePairs is a list of pairs of targets that can, in some cases,
+// run each other's binaries.
+var ptracePairs = []struct{ a1, a2 string }{
+ {"386", "amd64"},
+ {"arm", "arm64"},
+ {"mips", "mips64"},
+ {"mipsle", "mips64le"},
+}
+
+func main() {
+ if runtime.GOOS != GOOS || runtime.GOARCH != BuildArch {
+ fmt.Printf("Build system has GOOS_GOARCH = %s_%s, need %s_%s\n",
+ runtime.GOOS, runtime.GOARCH, GOOS, BuildArch)
+ return
+ }
+
+ // Check that we are using the new build system if we should
+ if os.Getenv("GOLANG_SYS_BUILD") != "docker" {
+ fmt.Println("In the new build system, mkall.go should not be called directly.")
+ fmt.Println("See README.md")
+ return
+ }
+
+ // Parse the command line options
+ if len(os.Args) != 3 {
+ fmt.Println("USAGE: go run linux/mkall.go ")
+ return
+ }
+ LinuxDir = os.Args[1]
+ GlibcDir = os.Args[2]
+
+ for _, t := range targets {
+ fmt.Printf("----- GENERATING: %s -----\n", t.GoArch)
+ if err := t.generateFiles(); err != nil {
+ fmt.Printf("%v\n***** FAILURE: %s *****\n\n", err, t.GoArch)
+ } else {
+ fmt.Printf("----- SUCCESS: %s -----\n\n", t.GoArch)
+ }
+ }
+
+ fmt.Printf("----- GENERATING ptrace pairs -----\n")
+ ok := true
+ for _, p := range ptracePairs {
+ if err := generatePtracePair(p.a1, p.a2); err != nil {
+ fmt.Printf("%v\n***** FAILURE: %s/%s *****\n\n", err, p.a1, p.a2)
+ ok = false
+ }
+ }
+ if ok {
+ fmt.Printf("----- SUCCESS ptrace pairs -----\n\n")
+ }
+}
+
+// Makes an exec.Cmd with Stderr attached to os.Stderr
+func makeCommand(name string, args ...string) *exec.Cmd {
+ cmd := exec.Command(name, args...)
+ cmd.Stderr = os.Stderr
+ return cmd
+}
+
+// Runs the command, pipes output to a formatter, pipes that to an output file.
+func (t *target) commandFormatOutput(formatter string, outputFile string,
+ name string, args ...string) (err error) {
+ mainCmd := makeCommand(name, args...)
+
+ fmtCmd := makeCommand(formatter)
+ if formatter == "mkpost" {
+ fmtCmd = makeCommand("go", "run", "mkpost.go")
+ // Set GOARCH_TARGET so mkpost knows what GOARCH is..
+ fmtCmd.Env = append(os.Environ(), "GOARCH_TARGET="+t.GoArch)
+ // Set GOARCH to host arch for mkpost, so it can run natively.
+ for i, s := range fmtCmd.Env {
+ if strings.HasPrefix(s, "GOARCH=") {
+ fmtCmd.Env[i] = "GOARCH=" + BuildArch
+ }
+ }
+ }
+
+ // mainCmd | fmtCmd > outputFile
+ if fmtCmd.Stdin, err = mainCmd.StdoutPipe(); err != nil {
+ return
+ }
+ if fmtCmd.Stdout, err = os.Create(outputFile); err != nil {
+ return
+ }
+
+ // Make sure the formatter eventually closes
+ if err = fmtCmd.Start(); err != nil {
+ return
+ }
+ defer func() {
+ fmtErr := fmtCmd.Wait()
+ if err == nil {
+ err = fmtErr
+ }
+ }()
+
+ return mainCmd.Run()
+}
+
+// Generates all the files for a Linux target
+func (t *target) generateFiles() error {
+ // Setup environment variables
+ os.Setenv("GOOS", GOOS)
+ os.Setenv("GOARCH", t.GoArch)
+
+ // Get appropriate compiler and emulator (unless on x86)
+ if t.LinuxArch != "x86" {
+ // Check/Setup cross compiler
+ compiler := t.GNUArch + "-gcc"
+ if _, err := exec.LookPath(compiler); err != nil {
+ return err
+ }
+ os.Setenv("CC", compiler)
+
+ // Check/Setup emulator (usually first component of GNUArch)
+ qemuArchName := t.GNUArch[:strings.Index(t.GNUArch, "-")]
+ if t.LinuxArch == "powerpc" {
+ qemuArchName = t.GoArch
+ }
+ os.Setenv("GORUN", "qemu-"+qemuArchName)
+ } else {
+ os.Setenv("CC", "gcc")
+ }
+
+ // Make the include directory and fill it with headers
+ if err := os.MkdirAll(IncludeDir, os.ModePerm); err != nil {
+ return err
+ }
+ defer os.RemoveAll(IncludeDir)
+ if err := t.makeHeaders(); err != nil {
+ return fmt.Errorf("could not make header files: %v", err)
+ }
+ fmt.Println("header files generated")
+
+ // Make each of the four files
+ if err := t.makeZSysnumFile(); err != nil {
+ return fmt.Errorf("could not make zsysnum file: %v", err)
+ }
+ fmt.Println("zsysnum file generated")
+
+ if err := t.makeZSyscallFile(); err != nil {
+ return fmt.Errorf("could not make zsyscall file: %v", err)
+ }
+ fmt.Println("zsyscall file generated")
+
+ if err := t.makeZTypesFile(); err != nil {
+ return fmt.Errorf("could not make ztypes file: %v", err)
+ }
+ fmt.Println("ztypes file generated")
+
+ if err := t.makeZErrorsFile(); err != nil {
+ return fmt.Errorf("could not make zerrors file: %v", err)
+ }
+ fmt.Println("zerrors file generated")
+
+ return nil
+}
+
+// Create the Linux and glibc headers in the include directory.
+func (t *target) makeHeaders() error {
+ // Make the Linux headers we need for this architecture
+ linuxMake := makeCommand("make", "headers_install", "ARCH="+t.LinuxArch, "INSTALL_HDR_PATH="+TempDir)
+ linuxMake.Dir = LinuxDir
+ if err := linuxMake.Run(); err != nil {
+ return err
+ }
+
+ // A Temporary build directory for glibc
+ if err := os.MkdirAll(BuildDir, os.ModePerm); err != nil {
+ return err
+ }
+ defer os.RemoveAll(BuildDir)
+
+ // Make the glibc headers we need for this architecture
+ confScript := filepath.Join(GlibcDir, "configure")
+ glibcConf := makeCommand(confScript, "--prefix="+TempDir, "--host="+t.GNUArch, "--enable-kernel="+MinKernel)
+ glibcConf.Dir = BuildDir
+ if err := glibcConf.Run(); err != nil {
+ return err
+ }
+ glibcMake := makeCommand("make", "install-headers")
+ glibcMake.Dir = BuildDir
+ if err := glibcMake.Run(); err != nil {
+ return err
+ }
+ // We only need an empty stubs file
+ stubsFile := filepath.Join(IncludeDir, "gnu/stubs.h")
+ if file, err := os.Create(stubsFile); err != nil {
+ return err
+ } else {
+ file.Close()
+ }
+
+ return nil
+}
+
+// makes the zsysnum_linux_$GOARCH.go file
+func (t *target) makeZSysnumFile() error {
+ zsysnumFile := fmt.Sprintf("zsysnum_linux_%s.go", t.GoArch)
+ unistdFile := filepath.Join(IncludeDir, "asm/unistd.h")
+
+ args := append(t.cFlags(), unistdFile)
+ return t.commandFormatOutput("gofmt", zsysnumFile, "linux/mksysnum.pl", args...)
+}
+
+// makes the zsyscall_linux_$GOARCH.go file
+func (t *target) makeZSyscallFile() error {
+ zsyscallFile := fmt.Sprintf("zsyscall_linux_%s.go", t.GoArch)
+ // Find the correct architecture syscall file (might end with x.go)
+ archSyscallFile := fmt.Sprintf("syscall_linux_%s.go", t.GoArch)
+ if _, err := os.Stat(archSyscallFile); os.IsNotExist(err) {
+ shortArch := strings.TrimSuffix(t.GoArch, "le")
+ archSyscallFile = fmt.Sprintf("syscall_linux_%sx.go", shortArch)
+ }
+
+ args := append(t.mksyscallFlags(), "-tags", "linux,"+t.GoArch,
+ "syscall_linux.go", archSyscallFile)
+ return t.commandFormatOutput("gofmt", zsyscallFile, "./mksyscall.pl", args...)
+}
+
+// makes the zerrors_linux_$GOARCH.go file
+func (t *target) makeZErrorsFile() error {
+ zerrorsFile := fmt.Sprintf("zerrors_linux_%s.go", t.GoArch)
+
+ return t.commandFormatOutput("gofmt", zerrorsFile, "./mkerrors.sh", t.cFlags()...)
+}
+
+// makes the ztypes_linux_$GOARCH.go file
+func (t *target) makeZTypesFile() error {
+ ztypesFile := fmt.Sprintf("ztypes_linux_%s.go", t.GoArch)
+
+ args := []string{"tool", "cgo", "-godefs", "--"}
+ args = append(args, t.cFlags()...)
+ args = append(args, "linux/types.go")
+ return t.commandFormatOutput("mkpost", ztypesFile, "go", args...)
+}
+
+// Flags that should be given to gcc and cgo for this target
+func (t *target) cFlags() []string {
+ // Compile statically to avoid cross-architecture dynamic linking.
+ flags := []string{"-Wall", "-Werror", "-static", "-I" + IncludeDir}
+
+ // Architecture-specific flags
+ if t.SignedChar {
+ flags = append(flags, "-fsigned-char")
+ }
+ if t.LinuxArch == "x86" {
+ flags = append(flags, fmt.Sprintf("-m%d", t.Bits))
+ }
+
+ return flags
+}
+
+// Flags that should be given to mksyscall for this target
+func (t *target) mksyscallFlags() (flags []string) {
+ if t.Bits == 32 {
+ if t.BigEndian {
+ flags = append(flags, "-b32")
+ } else {
+ flags = append(flags, "-l32")
+ }
+ }
+
+ // This flag menas a 64-bit value should use (even, odd)-pair.
+ if t.GoArch == "arm" || (t.LinuxArch == "mips" && t.Bits == 32) {
+ flags = append(flags, "-arm")
+ }
+ return
+}
+
+// generatePtracePair takes a pair of GOARCH values that can run each
+// other's binaries, such as 386 and amd64. It extracts the PtraceRegs
+// type for each one. It writes a new file defining the types
+// PtraceRegsArch1 and PtraceRegsArch2 and the corresponding functions
+// Ptrace{Get,Set}Regs{arch1,arch2}. This permits debugging the other
+// binary on a native system.
+func generatePtracePair(arch1, arch2 string) error {
+ def1, err := ptraceDef(arch1)
+ if err != nil {
+ return err
+ }
+ def2, err := ptraceDef(arch2)
+ if err != nil {
+ return err
+ }
+ f, err := os.Create(fmt.Sprintf("zptrace%s_linux.go", arch1))
+ if err != nil {
+ return err
+ }
+ buf := bufio.NewWriter(f)
+ fmt.Fprintf(buf, "// Code generated by linux/mkall.go generatePtracePair(%s, %s). DO NOT EDIT.\n", arch1, arch2)
+ fmt.Fprintf(buf, "\n")
+ fmt.Fprintf(buf, "// +build linux\n")
+ fmt.Fprintf(buf, "// +build %s %s\n", arch1, arch2)
+ fmt.Fprintf(buf, "\n")
+ fmt.Fprintf(buf, "package unix\n")
+ fmt.Fprintf(buf, "\n")
+ fmt.Fprintf(buf, "%s\n", `import "unsafe"`)
+ fmt.Fprintf(buf, "\n")
+ writeOnePtrace(buf, arch1, def1)
+ fmt.Fprintf(buf, "\n")
+ writeOnePtrace(buf, arch2, def2)
+ if err := buf.Flush(); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ptraceDef returns the definition of PtraceRegs for arch.
+func ptraceDef(arch string) (string, error) {
+ filename := fmt.Sprintf("ztypes_linux_%s.go", arch)
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return "", fmt.Errorf("reading %s: %v", filename, err)
+ }
+ start := bytes.Index(data, []byte("type PtraceRegs struct"))
+ if start < 0 {
+ return "", fmt.Errorf("%s: no definition of PtraceRegs", filename)
+ }
+ data = data[start:]
+ end := bytes.Index(data, []byte("\n}\n"))
+ if end < 0 {
+ return "", fmt.Errorf("%s: can't find end of PtraceRegs definition", filename)
+ }
+ return string(data[:end+2]), nil
+}
+
+// writeOnePtrace writes out the ptrace definitions for arch.
+func writeOnePtrace(w io.Writer, arch, def string) {
+ uarch := string(unicode.ToUpper(rune(arch[0]))) + arch[1:]
+ fmt.Fprintf(w, "// PtraceRegs%s is the registers used by %s binaries.\n", uarch, arch)
+ fmt.Fprintf(w, "%s\n", strings.Replace(def, "PtraceRegs", "PtraceRegs"+uarch, 1))
+ fmt.Fprintf(w, "\n")
+ fmt.Fprintf(w, "// PtraceGetRegs%s fetches the registers used by %s binaries.\n", uarch, arch)
+ fmt.Fprintf(w, "func PtraceGetRegs%s(pid int, regsout *PtraceRegs%s) error {\n", uarch, uarch)
+ fmt.Fprintf(w, "\treturn ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))\n")
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "\n")
+ fmt.Fprintf(w, "// PtraceSetRegs%s sets the registers used by %s binaries.\n", uarch, arch)
+ fmt.Fprintf(w, "func PtraceSetRegs%s(pid int, regs *PtraceRegs%s) error {\n", uarch, uarch)
+ fmt.Fprintf(w, "\treturn ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))\n")
+ fmt.Fprintf(w, "}\n")
+}
diff --git a/vendor/golang.org/x/sys/unix/linux/mksysnum.pl b/vendor/golang.org/x/sys/unix/linux/mksysnum.pl
new file mode 100755
index 0000000000..63fd800bcb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/linux/mksysnum.pl
@@ -0,0 +1,85 @@
+#!/usr/bin/env perl
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+use strict;
+
+if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
+ print STDERR "GOARCH or GOOS not defined in environment\n";
+ exit 1;
+}
+
+# Check that we are using the new build system if we should
+if($ENV{'GOLANG_SYS_BUILD'} ne "docker") {
+ print STDERR "In the new build system, mksysnum should not be called directly.\n";
+ print STDERR "See README.md\n";
+ exit 1;
+}
+
+my $command = "$0 ". join(' ', @ARGV);
+
+print < 999){
+ # ignore deprecated syscalls that are no longer implemented
+ # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716
+ return;
+ }
+ $name =~ y/a-z/A-Z/;
+ $num = $num + $offset;
+ print " SYS_$name = $num;\n";
+}
+
+my $prev;
+open(CC, "$ENV{'CC'} -E -dD @ARGV |") || die "can't run $ENV{'CC'}";
+while(){
+ if(/^#define __NR_Linux\s+([0-9]+)/){
+ # mips/mips64: extract offset
+ $offset = $1;
+ }
+ elsif(/^#define __NR(\w*)_SYSCALL_BASE\s+([0-9]+)/){
+ # arm: extract offset
+ $offset = $1;
+ }
+ elsif(/^#define __NR_syscalls\s+/) {
+ # ignore redefinitions of __NR_syscalls
+ }
+ elsif(/^#define __NR_(\w*)Linux_syscalls\s+/) {
+ # mips/mips64: ignore definitions about the number of syscalls
+ }
+ elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){
+ $prev = $2;
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
+ fmt($1, $prev+$2)
+ }
+ elsif(/^#define __NR_(\w+)\s+\(__NR_Linux \+ ([0-9]+)/){
+ fmt($1, $2);
+ }
+ elsif(/^#define __NR_(\w+)\s+\(__NR_SYSCALL_BASE \+ ([0-9]+)/){
+ fmt($1, $2);
+ }
+}
+
+print <
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+// On mips64, the glibc stat and kernel stat do not agree
+#if (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64)
+
+// Use the stat defined by the kernel with a few modifications. These are:
+// * The time fields (like st_atime and st_atimensec) use the timespec
+// struct (like st_atim) for consitancy with the glibc fields.
+// * The padding fields get different names to not break compatibility.
+// * st_blocks is signed, again for compatibility.
+struct stat {
+ unsigned int st_dev;
+ unsigned int st_pad1[3]; // Reserved for st_dev expansion
+
+ unsigned long st_ino;
+
+ mode_t st_mode;
+ __u32 st_nlink;
+
+ uid_t st_uid;
+ gid_t st_gid;
+
+ unsigned int st_rdev;
+ unsigned int st_pad2[3]; // Reserved for st_rdev expansion
+
+ off_t st_size;
+
+ // These are declared as speperate fields in the kernel. Here we use
+ // the timespec struct for consistancy with the other stat structs.
+ struct timespec st_atim;
+ struct timespec st_mtim;
+ struct timespec st_ctim;
+
+ unsigned int st_blksize;
+ unsigned int st_pad4;
+
+ long st_blocks;
+};
+
+// These are needed because we do not include fcntl.h or sys/types.h
+#include
+#include
+
+#else
+
+// Use the stat defined by glibc
+#include
+#include
+
+#endif
+
+#ifdef TCSETS2
+// On systems that have "struct termios2" use this as type Termios.
+typedef struct termios2 termios_t;
+#else
+typedef struct termios termios_t;
+#endif
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_ll s5;
+ struct sockaddr_nl s6;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+// copied from /usr/include/bluetooth/hci.h
+struct sockaddr_hci {
+ sa_family_t hci_family;
+ unsigned short hci_dev;
+ unsigned short hci_channel;
+};;
+
+// copied from /usr/include/linux/un.h
+struct my_sockaddr_un {
+ sa_family_t sun_family;
+#if defined(__ARM_EABI__) || defined(__powerpc64__)
+ // on ARM char is by default unsigned
+ signed char sun_path[108];
+#else
+ char sun_path[108];
+#endif
+};
+
+#ifdef __ARM_EABI__
+typedef struct user_regs PtraceRegs;
+#elif defined(__aarch64__)
+typedef struct user_pt_regs PtraceRegs;
+#elif defined(__mips__) || defined(__powerpc64__)
+typedef struct pt_regs PtraceRegs;
+#elif defined(__s390x__)
+typedef struct _user_regs_struct PtraceRegs;
+#elif defined(__sparc__)
+#include
+typedef struct pt_regs PtraceRegs;
+#else
+typedef struct user_regs_struct PtraceRegs;
+#endif
+
+#if defined(__s390x__)
+typedef struct _user_psw_struct ptracePsw;
+typedef struct _user_fpregs_struct ptraceFpregs;
+typedef struct _user_per_struct ptracePer;
+#else
+typedef struct {} ptracePsw;
+typedef struct {} ptraceFpregs;
+typedef struct {} ptracePer;
+#endif
+
+// The real epoll_event is a union, and godefs doesn't handle it well.
+struct my_epoll_event {
+ uint32_t events;
+#if defined(__ARM_EABI__) || defined(__aarch64__) || (defined(__mips__) && _MIPS_SIM == _ABIO32)
+ // padding is not specified in linux/eventpoll.h but added to conform to the
+ // alignment requirements of EABI
+ int32_t padFd;
+#elif defined(__powerpc64__) || defined(__s390x__) || defined(__sparc__)
+ int32_t _padFd;
+#endif
+ int32_t fd;
+ int32_t pad;
+};
+
+*/
+import "C"
+
+// Machine characteristics; for internal use.
+
+const (
+ sizeofPtr = C.sizeofPtr
+ sizeofShort = C.sizeof_short
+ sizeofInt = C.sizeof_int
+ sizeofLong = C.sizeof_long
+ sizeofLongLong = C.sizeof_longlong
+ PathMax = C.PATH_MAX
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timex C.struct_timex
+
+type Time_t C.time_t
+
+type Tms C.struct_tms
+
+type Utimbuf C.struct_utimbuf
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Dirent C.struct_dirent
+
+type Fsid C.fsid_t
+
+type Flock_t C.struct_flock
+
+// Filesystem Encryption
+
+type FscryptPolicy C.struct_fscrypt_policy
+
+type FscryptKey C.struct_fscrypt_key
+
+// Structure for Keyctl
+
+type KeyctlDHParams C.struct_keyctl_dh_params
+
+// Advice to Fadvise
+
+const (
+ FADV_NORMAL = C.POSIX_FADV_NORMAL
+ FADV_RANDOM = C.POSIX_FADV_RANDOM
+ FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL
+ FADV_WILLNEED = C.POSIX_FADV_WILLNEED
+ FADV_DONTNEED = C.POSIX_FADV_DONTNEED
+ FADV_NOREUSE = C.POSIX_FADV_NOREUSE
+)
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_my_sockaddr_un
+
+type RawSockaddrLinklayer C.struct_sockaddr_ll
+
+type RawSockaddrNetlink C.struct_sockaddr_nl
+
+type RawSockaddrHCI C.struct_sockaddr_hci
+
+type RawSockaddrCAN C.struct_sockaddr_can
+
+type RawSockaddrALG C.struct_sockaddr_alg
+
+type RawSockaddrVM C.struct_sockaddr_vm
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPMreqn C.struct_ip_mreqn
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type PacketMreq C.struct_packet_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet4Pktinfo C.struct_in_pktinfo
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+type Ucred C.struct_ucred
+
+type TCPInfo C.struct_tcp_info
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll
+ SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl
+ SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci
+ SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can
+ SizeofSockaddrALG = C.sizeof_struct_sockaddr_alg
+ SizeofSockaddrVM = C.sizeof_struct_sockaddr_vm
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIovec = C.sizeof_struct_iovec
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofPacketMreq = C.sizeof_struct_packet_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+ SizeofUcred = C.sizeof_struct_ucred
+ SizeofTCPInfo = C.sizeof_struct_tcp_info
+)
+
+// Netlink routing and interface messages
+
+const (
+ IFA_UNSPEC = C.IFA_UNSPEC
+ IFA_ADDRESS = C.IFA_ADDRESS
+ IFA_LOCAL = C.IFA_LOCAL
+ IFA_LABEL = C.IFA_LABEL
+ IFA_BROADCAST = C.IFA_BROADCAST
+ IFA_ANYCAST = C.IFA_ANYCAST
+ IFA_CACHEINFO = C.IFA_CACHEINFO
+ IFA_MULTICAST = C.IFA_MULTICAST
+ IFLA_UNSPEC = C.IFLA_UNSPEC
+ IFLA_ADDRESS = C.IFLA_ADDRESS
+ IFLA_BROADCAST = C.IFLA_BROADCAST
+ IFLA_IFNAME = C.IFLA_IFNAME
+ IFLA_MTU = C.IFLA_MTU
+ IFLA_LINK = C.IFLA_LINK
+ IFLA_QDISC = C.IFLA_QDISC
+ IFLA_STATS = C.IFLA_STATS
+ IFLA_COST = C.IFLA_COST
+ IFLA_PRIORITY = C.IFLA_PRIORITY
+ IFLA_MASTER = C.IFLA_MASTER
+ IFLA_WIRELESS = C.IFLA_WIRELESS
+ IFLA_PROTINFO = C.IFLA_PROTINFO
+ IFLA_TXQLEN = C.IFLA_TXQLEN
+ IFLA_MAP = C.IFLA_MAP
+ IFLA_WEIGHT = C.IFLA_WEIGHT
+ IFLA_OPERSTATE = C.IFLA_OPERSTATE
+ IFLA_LINKMODE = C.IFLA_LINKMODE
+ IFLA_LINKINFO = C.IFLA_LINKINFO
+ IFLA_NET_NS_PID = C.IFLA_NET_NS_PID
+ IFLA_IFALIAS = C.IFLA_IFALIAS
+ IFLA_MAX = C.IFLA_MAX
+ RT_SCOPE_UNIVERSE = C.RT_SCOPE_UNIVERSE
+ RT_SCOPE_SITE = C.RT_SCOPE_SITE
+ RT_SCOPE_LINK = C.RT_SCOPE_LINK
+ RT_SCOPE_HOST = C.RT_SCOPE_HOST
+ RT_SCOPE_NOWHERE = C.RT_SCOPE_NOWHERE
+ RT_TABLE_UNSPEC = C.RT_TABLE_UNSPEC
+ RT_TABLE_COMPAT = C.RT_TABLE_COMPAT
+ RT_TABLE_DEFAULT = C.RT_TABLE_DEFAULT
+ RT_TABLE_MAIN = C.RT_TABLE_MAIN
+ RT_TABLE_LOCAL = C.RT_TABLE_LOCAL
+ RT_TABLE_MAX = C.RT_TABLE_MAX
+ RTA_UNSPEC = C.RTA_UNSPEC
+ RTA_DST = C.RTA_DST
+ RTA_SRC = C.RTA_SRC
+ RTA_IIF = C.RTA_IIF
+ RTA_OIF = C.RTA_OIF
+ RTA_GATEWAY = C.RTA_GATEWAY
+ RTA_PRIORITY = C.RTA_PRIORITY
+ RTA_PREFSRC = C.RTA_PREFSRC
+ RTA_METRICS = C.RTA_METRICS
+ RTA_MULTIPATH = C.RTA_MULTIPATH
+ RTA_FLOW = C.RTA_FLOW
+ RTA_CACHEINFO = C.RTA_CACHEINFO
+ RTA_TABLE = C.RTA_TABLE
+ RTN_UNSPEC = C.RTN_UNSPEC
+ RTN_UNICAST = C.RTN_UNICAST
+ RTN_LOCAL = C.RTN_LOCAL
+ RTN_BROADCAST = C.RTN_BROADCAST
+ RTN_ANYCAST = C.RTN_ANYCAST
+ RTN_MULTICAST = C.RTN_MULTICAST
+ RTN_BLACKHOLE = C.RTN_BLACKHOLE
+ RTN_UNREACHABLE = C.RTN_UNREACHABLE
+ RTN_PROHIBIT = C.RTN_PROHIBIT
+ RTN_THROW = C.RTN_THROW
+ RTN_NAT = C.RTN_NAT
+ RTN_XRESOLVE = C.RTN_XRESOLVE
+ RTNLGRP_NONE = C.RTNLGRP_NONE
+ RTNLGRP_LINK = C.RTNLGRP_LINK
+ RTNLGRP_NOTIFY = C.RTNLGRP_NOTIFY
+ RTNLGRP_NEIGH = C.RTNLGRP_NEIGH
+ RTNLGRP_TC = C.RTNLGRP_TC
+ RTNLGRP_IPV4_IFADDR = C.RTNLGRP_IPV4_IFADDR
+ RTNLGRP_IPV4_MROUTE = C.RTNLGRP_IPV4_MROUTE
+ RTNLGRP_IPV4_ROUTE = C.RTNLGRP_IPV4_ROUTE
+ RTNLGRP_IPV4_RULE = C.RTNLGRP_IPV4_RULE
+ RTNLGRP_IPV6_IFADDR = C.RTNLGRP_IPV6_IFADDR
+ RTNLGRP_IPV6_MROUTE = C.RTNLGRP_IPV6_MROUTE
+ RTNLGRP_IPV6_ROUTE = C.RTNLGRP_IPV6_ROUTE
+ RTNLGRP_IPV6_IFINFO = C.RTNLGRP_IPV6_IFINFO
+ RTNLGRP_IPV6_PREFIX = C.RTNLGRP_IPV6_PREFIX
+ RTNLGRP_IPV6_RULE = C.RTNLGRP_IPV6_RULE
+ RTNLGRP_ND_USEROPT = C.RTNLGRP_ND_USEROPT
+ SizeofNlMsghdr = C.sizeof_struct_nlmsghdr
+ SizeofNlMsgerr = C.sizeof_struct_nlmsgerr
+ SizeofRtGenmsg = C.sizeof_struct_rtgenmsg
+ SizeofNlAttr = C.sizeof_struct_nlattr
+ SizeofRtAttr = C.sizeof_struct_rtattr
+ SizeofIfInfomsg = C.sizeof_struct_ifinfomsg
+ SizeofIfAddrmsg = C.sizeof_struct_ifaddrmsg
+ SizeofRtMsg = C.sizeof_struct_rtmsg
+ SizeofRtNexthop = C.sizeof_struct_rtnexthop
+)
+
+type NlMsghdr C.struct_nlmsghdr
+
+type NlMsgerr C.struct_nlmsgerr
+
+type RtGenmsg C.struct_rtgenmsg
+
+type NlAttr C.struct_nlattr
+
+type RtAttr C.struct_rtattr
+
+type IfInfomsg C.struct_ifinfomsg
+
+type IfAddrmsg C.struct_ifaddrmsg
+
+type RtMsg C.struct_rtmsg
+
+type RtNexthop C.struct_rtnexthop
+
+// Linux socket filter
+
+const (
+ SizeofSockFilter = C.sizeof_struct_sock_filter
+ SizeofSockFprog = C.sizeof_struct_sock_fprog
+)
+
+type SockFilter C.struct_sock_filter
+
+type SockFprog C.struct_sock_fprog
+
+// Inotify
+
+type InotifyEvent C.struct_inotify_event
+
+const SizeofInotifyEvent = C.sizeof_struct_inotify_event
+
+// Ptrace
+
+// Register structures
+type PtraceRegs C.PtraceRegs
+
+// Structures contained in PtraceRegs on s390x (exported by mkpost.go)
+type PtracePsw C.ptracePsw
+
+type PtraceFpregs C.ptraceFpregs
+
+type PtracePer C.ptracePer
+
+// Misc
+
+type FdSet C.fd_set
+
+type Sysinfo_t C.struct_sysinfo
+
+type Utsname C.struct_utsname
+
+type Ustat_t C.struct_ustat
+
+type EpollEvent C.struct_my_epoll_event
+
+const (
+ AT_FDCWD = C.AT_FDCWD
+ AT_NO_AUTOMOUNT = C.AT_NO_AUTOMOUNT
+ AT_REMOVEDIR = C.AT_REMOVEDIR
+ AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW
+ AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+type PollFd C.struct_pollfd
+
+const (
+ POLLIN = C.POLLIN
+ POLLPRI = C.POLLPRI
+ POLLOUT = C.POLLOUT
+ POLLRDHUP = C.POLLRDHUP
+ POLLERR = C.POLLERR
+ POLLHUP = C.POLLHUP
+ POLLNVAL = C.POLLNVAL
+)
+
+type Sigset_t C.sigset_t
+
+const RNDGETENTCNT = C.RNDGETENTCNT
+
+const PERF_IOC_FLAG_GROUP = C.PERF_IOC_FLAG_GROUP
+
+// Terminal handling
+
+type Termios C.termios_t
+
+type Winsize C.struct_winsize
+
+// Taskstats and cgroup stats.
+
+type Taskstats C.struct_taskstats
+
+const (
+ TASKSTATS_CMD_UNSPEC = C.TASKSTATS_CMD_UNSPEC
+ TASKSTATS_CMD_GET = C.TASKSTATS_CMD_GET
+ TASKSTATS_CMD_NEW = C.TASKSTATS_CMD_NEW
+ TASKSTATS_TYPE_UNSPEC = C.TASKSTATS_TYPE_UNSPEC
+ TASKSTATS_TYPE_PID = C.TASKSTATS_TYPE_PID
+ TASKSTATS_TYPE_TGID = C.TASKSTATS_TYPE_TGID
+ TASKSTATS_TYPE_STATS = C.TASKSTATS_TYPE_STATS
+ TASKSTATS_TYPE_AGGR_PID = C.TASKSTATS_TYPE_AGGR_PID
+ TASKSTATS_TYPE_AGGR_TGID = C.TASKSTATS_TYPE_AGGR_TGID
+ TASKSTATS_TYPE_NULL = C.TASKSTATS_TYPE_NULL
+ TASKSTATS_CMD_ATTR_UNSPEC = C.TASKSTATS_CMD_ATTR_UNSPEC
+ TASKSTATS_CMD_ATTR_PID = C.TASKSTATS_CMD_ATTR_PID
+ TASKSTATS_CMD_ATTR_TGID = C.TASKSTATS_CMD_ATTR_TGID
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = C.TASKSTATS_CMD_ATTR_REGISTER_CPUMASK
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = C.TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK
+)
+
+type CGroupStats C.struct_cgroupstats
+
+const (
+ CGROUPSTATS_CMD_UNSPEC = C.__TASKSTATS_CMD_MAX
+ CGROUPSTATS_CMD_GET = C.CGROUPSTATS_CMD_GET
+ CGROUPSTATS_CMD_NEW = C.CGROUPSTATS_CMD_NEW
+ CGROUPSTATS_TYPE_UNSPEC = C.CGROUPSTATS_TYPE_UNSPEC
+ CGROUPSTATS_TYPE_CGROUP_STATS = C.CGROUPSTATS_TYPE_CGROUP_STATS
+ CGROUPSTATS_CMD_ATTR_UNSPEC = C.CGROUPSTATS_CMD_ATTR_UNSPEC
+ CGROUPSTATS_CMD_ATTR_FD = C.CGROUPSTATS_CMD_ATTR_FD
+)
+
+// Generic netlink
+
+type Genlmsghdr C.struct_genlmsghdr
+
+const (
+ CTRL_CMD_UNSPEC = C.CTRL_CMD_UNSPEC
+ CTRL_CMD_NEWFAMILY = C.CTRL_CMD_NEWFAMILY
+ CTRL_CMD_DELFAMILY = C.CTRL_CMD_DELFAMILY
+ CTRL_CMD_GETFAMILY = C.CTRL_CMD_GETFAMILY
+ CTRL_CMD_NEWOPS = C.CTRL_CMD_NEWOPS
+ CTRL_CMD_DELOPS = C.CTRL_CMD_DELOPS
+ CTRL_CMD_GETOPS = C.CTRL_CMD_GETOPS
+ CTRL_CMD_NEWMCAST_GRP = C.CTRL_CMD_NEWMCAST_GRP
+ CTRL_CMD_DELMCAST_GRP = C.CTRL_CMD_DELMCAST_GRP
+ CTRL_CMD_GETMCAST_GRP = C.CTRL_CMD_GETMCAST_GRP
+ CTRL_ATTR_UNSPEC = C.CTRL_ATTR_UNSPEC
+ CTRL_ATTR_FAMILY_ID = C.CTRL_ATTR_FAMILY_ID
+ CTRL_ATTR_FAMILY_NAME = C.CTRL_ATTR_FAMILY_NAME
+ CTRL_ATTR_VERSION = C.CTRL_ATTR_VERSION
+ CTRL_ATTR_HDRSIZE = C.CTRL_ATTR_HDRSIZE
+ CTRL_ATTR_MAXATTR = C.CTRL_ATTR_MAXATTR
+ CTRL_ATTR_OPS = C.CTRL_ATTR_OPS
+ CTRL_ATTR_MCAST_GROUPS = C.CTRL_ATTR_MCAST_GROUPS
+ CTRL_ATTR_OP_UNSPEC = C.CTRL_ATTR_OP_UNSPEC
+ CTRL_ATTR_OP_ID = C.CTRL_ATTR_OP_ID
+ CTRL_ATTR_OP_FLAGS = C.CTRL_ATTR_OP_FLAGS
+ CTRL_ATTR_MCAST_GRP_UNSPEC = C.CTRL_ATTR_MCAST_GRP_UNSPEC
+ CTRL_ATTR_MCAST_GRP_NAME = C.CTRL_ATTR_MCAST_GRP_NAME
+ CTRL_ATTR_MCAST_GRP_ID = C.CTRL_ATTR_MCAST_GRP_ID
+)
+
+// CPU affinity
+
+type cpuMask C.__cpu_mask
+
+const (
+ _CPU_SETSIZE = C.__CPU_SETSIZE
+ _NCPUBITS = C.__NCPUBITS
+)
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index 3e224c57e2..1715122bd4 100755
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -3,75 +3,9 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-# The unix package provides access to the raw system call
-# interface of the underlying operating system. Porting Go to
-# a new architecture/operating system combination requires
-# some manual effort, though there are tools that automate
-# much of the process. The auto-generated files have names
-# beginning with z.
-#
-# This script runs or (given -n) prints suggested commands to generate z files
-# for the current system. Running those commands is not automatic.
-# This script is documentation more than anything else.
-#
-# * asm_${GOOS}_${GOARCH}.s
-#
-# This hand-written assembly file implements system call dispatch.
-# There are three entry points:
-#
-# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
-# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
-# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
-#
-# The first and second are the standard ones; they differ only in
-# how many arguments can be passed to the kernel.
-# The third is for low-level use by the ForkExec wrapper;
-# unlike the first two, it does not call into the scheduler to
-# let it know that a system call is running.
-#
-# * syscall_${GOOS}.go
-#
-# This hand-written Go file implements system calls that need
-# special handling and lists "//sys" comments giving prototypes
-# for ones that can be auto-generated. Mksyscall reads those
-# comments to generate the stubs.
-#
-# * syscall_${GOOS}_${GOARCH}.go
-#
-# Same as syscall_${GOOS}.go except that it contains code specific
-# to ${GOOS} on one particular architecture.
-#
-# * types_${GOOS}.c
-#
-# This hand-written C file includes standard C headers and then
-# creates typedef or enum names beginning with a dollar sign
-# (use of $ in variable names is a gcc extension). The hardest
-# part about preparing this file is figuring out which headers to
-# include and which symbols need to be #defined to get the
-# actual data structures that pass through to the kernel system calls.
-# Some C libraries present alternate versions for binary compatibility
-# and translate them on the way in and out of system calls, but
-# there is almost always a #define that can get the real ones.
-# See types_darwin.c and types_linux.c for examples.
-#
-# * zerror_${GOOS}_${GOARCH}.go
-#
-# This machine-generated file defines the system's error numbers,
-# error strings, and signal numbers. The generator is "mkerrors.sh".
-# Usually no arguments are needed, but mkerrors.sh will pass its
-# arguments on to godefs.
-#
-# * zsyscall_${GOOS}_${GOARCH}.go
-#
-# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
-#
-# * zsysnum_${GOOS}_${GOARCH}.go
-#
-# Generated by mksysnum_${GOOS}.
-#
-# * ztypes_${GOOS}_${GOARCH}.go
-#
-# Generated by godefs; see types_${GOOS}.c above.
+# This script runs or (given -n) prints suggested commands to generate files for
+# the Architecture/OS specified by the GOARCH and GOOS environment variables.
+# See README.md for more information about how the build system works.
GOOSARCH="${GOOS}_${GOARCH}"
@@ -84,11 +18,14 @@ zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
run="sh"
+cmd=""
case "$1" in
-syscalls)
for i in zsyscall*go
do
+ # Run the command line that appears in the first line
+ # of the generated file to regenerate it.
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
@@ -96,6 +33,7 @@ case "$1" in
;;
-n)
run="cat"
+ cmd="echo"
shift
esac
@@ -107,6 +45,14 @@ case "$#" in
exit 2
esac
+if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
+ # Use then new build system
+ # Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ $cmd docker build --tag generate:$GOOS $GOOS
+ $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
+ exit
+fi
+
GOOSARCH_in=syscall_$GOOSARCH.go
case "$GOOSARCH" in
_* | *_ | _)
@@ -126,7 +72,7 @@ darwin_amd64)
;;
darwin_arm)
mkerrors="$mkerrors"
- mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_arm64)
@@ -134,12 +80,6 @@ darwin_arm64)
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
-dragonfly_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32 -dragonfly"
- mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -dragonfly"
@@ -162,67 +102,16 @@ freebsd_arm)
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be signed for making the bare syscall
- # API consistent across over platforms.
+ # API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
-linux_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32"
- mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-linux_amd64)
- unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
- if [ "$unistd_h" = "" ]; then
- echo >&2 cannot find unistd_64.h
- exit 1
- fi
+linux_sparc64)
+ GOOSARCH_in=syscall_linux_sparc64.go
+ unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
-linux_arm)
- mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -arm"
- mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-linux_arm64)
- unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
- if [ "$unistd_h" = "" ]; then
- echo >&2 cannot find unistd_64.h
- exit 1
- fi
- mksysnum="./mksysnum_linux.pl $unistd_h"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across over platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-linux_ppc64)
- GOOSARCH_in=syscall_linux_ppc64x.go
- unistd_h=/usr/include/asm/unistd.h
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_linux.pl $unistd_h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-linux_ppc64le)
- GOOSARCH_in=syscall_linux_ppc64x.go
- unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_linux.pl $unistd_h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-linux_s390x)
- GOOSARCH_in=syscall_linux_s390x.go
- unistd_h=/usr/include/asm/unistd.h
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_linux.pl $unistd_h"
- # Let the type of C char be signed to make the bare sys
- # API more consistent between platforms.
- # This is a deliberate departure from the way the syscall
- # package generates its version of the types file.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
netbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd"
@@ -235,11 +124,18 @@ netbsd_amd64)
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
+netbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -netbsd -arm"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
- zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
@@ -247,10 +143,18 @@ openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
- zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -openbsd -arm"
+ mksysctl="./mksysctl_openbsd.pl"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
solaris_amd64)
mksyscall="./mksyscall_solaris.pl"
mkerrors="$mkerrors -m64"
@@ -273,13 +177,12 @@ esac
syscall_goos="syscall_bsd.go $syscall_goos"
;;
esac
- if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
+ if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
;;
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then
- echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
- echo "$mktypes types_$GOOS.go | go run mkpost.go >>ztypes_$GOOSARCH.go";
+ echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go";
fi
) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index c40d788c4a..a452554bce 100755
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -16,9 +16,18 @@ if test -z "$GOARCH" -o -z "$GOOS"; then
exit 1
fi
+# Check that we are using the new build system if we should
+if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
+ if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the new build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
+ fi
+fi
+
CC=${CC:-cc}
-if [[ "$GOOS" -eq "solaris" ]]; then
+if [[ "$GOOS" = "solaris" ]]; then
# Assumes GNU versions of utilities in PATH.
export PATH=/usr/gnu/bin:$PATH
fi
@@ -29,6 +38,8 @@ includes_Darwin='
#define _DARWIN_C_SOURCE
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
+#include
+#include
#include
#include
#include
@@ -36,6 +47,8 @@ includes_Darwin='
#include
#include
#include
+#include
+#include
#include
#include
#include
@@ -66,6 +79,7 @@ includes_DragonFly='
'
includes_FreeBSD='
+#include
#include
#include
#include
@@ -73,6 +87,7 @@ includes_FreeBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -102,8 +117,39 @@ includes_Linux='
#endif
#define _GNU_SOURCE
+// is broken on powerpc64, as it fails to include definitions of
+// these structures. We just include them copied from