diff --git a/capture/listener.go b/capture/listener.go index f888e43e..7a11a0bc 100644 --- a/capture/listener.go +++ b/capture/listener.go @@ -10,6 +10,7 @@ package capture import ( "bytes" "encoding/binary" + "fmt" "io" "log" "net" @@ -324,10 +325,7 @@ func (t *Listener) readPcap() { log.Fatal(err) } - bpfSupported := true - if runtime.GOOS == "darwin" { - bpfSupported = false - } + const BPFSupported = runtime.GOOS != "darwin" var wg sync.WaitGroup wg.Add(len(devices)) @@ -402,17 +400,17 @@ func (t *Listener) readPcap() { } } - if bpfSupported { + if BPFSupported { var bpf string - if t.trackResponse { - bpf = "(tcp dst port " + strconv.Itoa(int(t.port)) + " and (" + bpfDstHost + ")) or (" + "tcp src port " + strconv.Itoa(int(t.port)) + " and (" + bpfSrcHost + "))" - } else { - bpf = "tcp dst port " + strconv.Itoa(int(t.port)) + " and (" + bpfDstHost + ")" - } - if t.bpfFilter != "" { bpf = t.bpfFilter + } else { + if t.trackResponse { + bpf = fmt.Sprintf("(tcp dst port %d and (%s)) or (tcp src port %d and (%s))", t.port, bpfDstHost, t.port, bpfSrcHost) + } else { + bpf = fmt.Sprintf("(tcp dst port %d and (%s))", t.port, bpfDstHost) + } } if err := handle.SetBPFFilter(bpf); err != nil { @@ -470,7 +468,6 @@ func (t *Listener) readPcap() { log.Println("Unknown packet layer", decoder, packet) break } - data = packet.Data()[of:] version := uint8(data[0]) >> 4 @@ -528,7 +525,7 @@ func (t *Listener) readPcap() { // We need only packets with data inside // Check that the buffer is larger than the size of the TCP header if len(data) > int(dataOffset*4) || isFIN { - if !bpfSupported { + if !BPFSupported { destPort := binary.BigEndian.Uint16(data[2:4]) srcPort := binary.BigEndian.Uint16(data[0:2]) diff --git a/capture/tcp_message.go b/capture/tcp_message.go index 54885b34..8ddcfb6b 100644 --- a/capture/tcp_message.go +++ b/capture/tcp_message.go @@ -5,7 +5,6 @@ import ( "crypto/sha1" "encoding/binary" "encoding/hex" - "log" "net" "strconv" "strings" @@ -14,8 +13,6 @@ import ( "github.com/buger/goreplay/proto" ) -var _ = log.Println - // TCPProtocol is a number to indicate type of protocol type TCPProtocol uint8 diff --git a/go.mod b/go.mod index ba2fbf1e..78b9a426 100644 --- a/go.mod +++ b/go.mod @@ -3,25 +3,17 @@ module github.com/buger/goreplay go 1.14 require ( - github.com/Shopify/sarama v1.11.0 - github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect + github.com/Shopify/sarama v1.26.4 github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61 // indirect - github.com/aws/aws-sdk-go v1.19.28 - github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect + github.com/aws/aws-sdk-go v1.32.7 + github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect - github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect - github.com/eapache/queue v1.1.0 // indirect - github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec // indirect - github.com/google/gopacket v0.0.0-20190211013929-f86faeb88894 - github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect - github.com/kr/pretty v0.0.0-20130510082846-bc9499caa0f4 // indirect - github.com/kr/text v0.0.0-20130911015532-6807e777504f // indirect + github.com/google/gopacket v1.1.17 + github.com/klauspost/compress v1.10.10 // indirect github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b - github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 // indirect - github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac // indirect - github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 // indirect - github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 // indirect + github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/stretchr/testify v1.5.1 // indirect + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect + golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect ) diff --git a/go.sum b/go.sum index 053f2115..e0b17641 100644 --- a/go.sum +++ b/go.sum @@ -1,71 +1,117 @@ -github.com/Shopify/sarama v1.11.0 h1:JD701r42hP1ms1uytUMa34jZgoddt0XhiElhJt+2ERE= -github.com/Shopify/sarama v1.11.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.26.4 h1:+17TxUq/PJEAfZAll0T7XJjSgQWCpaQSoki/x5yN8o8= +github.com/Shopify/sarama v1.26.4/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61 h1:Xz25cuW4REGC5W5UtpMU3QItMIImag615HiQcRbxqKQ= github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61/go.mod h1:ikc1XA58M+Rx7SEbf0bLJCfBkwayZ8T5jBo5FXK8Uz8= -github.com/aws/aws-sdk-go v1.19.28 h1:u0KMC+Qv0YVyz8YR6mREEtslSPkdUMzXgDJFD5196O8= -github.com/aws/aws-sdk-go v1.19.28/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/aws/aws-sdk-go v1.32.7 h1:H4VgdCSF1cHw0VD8zGc98T1bGdACoLkh/vK2L6wgOUU= +github.com/aws/aws-sdk-go v1.32.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 h1:f8ERmXYuaC+kCSv2w+y3rBK/oVu6If4DEm3jywJJ0hc= -github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14= -github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec h1:ZaSUjYC8aWT/om43c8YVz0SqjT8ABtqw7REbZGsCroE= -github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/gopacket v0.0.0-20190211013929-f86faeb88894 h1:uLlIRilC25T6VC/ePgHc2Oo1+AWlsrbmbMYUeEHfZAs= -github.com/google/gopacket v0.0.0-20190211013929-f86faeb88894/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/kr/pretty v0.0.0-20130510082846-bc9499caa0f4 h1:VOF8pfnliVudEzEOQLybN6eDhP5Q/LmwMnpDCBe9C4I= -github.com/kr/pretty v0.0.0-20130510082846-bc9499caa0f4/go.mod h1:Bvhd+E3laJ0AVkG0c9rmtZcnhV0HQ3+c3YxxqTvc/gA= -github.com/kr/text v0.0.0-20130911015532-6807e777504f h1:JaNmHIV9Eby6srQVWuiQ6n8ko2o/lG6udSRCbFZe1fs= -github.com/kr/text v0.0.0-20130911015532-6807e777504f/go.mod h1:sjUstKUATFIcff4qlB53Kml0wQPtJVc/3fWrmuUmcfA= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b h1:v29yPGHhOqw7VHEnTeQFAth3SsBrmwc8JfuhNY0G34k= github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b/go.mod h1:5MWrJXKRQyhQdUCF+vu6U5c4nQpg70vW3eHaU0/AYbU= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 h1:aFkJ6lx4FPip+S+Uw4aTegFMct9shDvP+79PsSxpm3w= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac h1:tKcxwAA5OHUQjL6sWsuCIcP9OnzN+RwKfvomtIOsfy8= -github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 h1:jp3jc/PyyTrTKjJJ6rWnhTbmo7tGgBFyG9AL5FIrO1I= -github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 h1:gwcdIpH6NU2iF8CmcqD+CP6+1CkRBOhHaPR+iu6raBY= -github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba h1:h0zCzEL5UW1mERvwTN6AXcc75PpLkY6OcReia6Dq1BM= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be h1:mI+jhqkn68ybP0ORJqunXn+fq+Eeb4hHKqLQcFICjAc= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/settings.go b/settings.go index 37594a2a..11c0a8ff 100644 --- a/settings.go +++ b/settings.go @@ -296,6 +296,16 @@ func Debug(args ...interface{}) { } } +// the following regexes follow Go semantics https://golang.org/ref/spec#Letters_and_digits +var ( + rB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+$`) + rKB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+kb$`) + rMB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+mb$`) + rGB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+gb$`) + rTB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+tb$`) + empt = regexp.MustCompile(`^[\n\t\r 0.\f\a]*$`) +) + // bufferParser parses buffer to bytes from different bases and data units // size is the buffer in string, rpl act as a replacement for empty buffer. // e.g: (--output-file-size-limit "") may override default 32mb with empty buffer, @@ -310,14 +320,6 @@ func bufferParser(size, rpl string) (buffer int64, err error) { ) var ( - // the following regexes follow Go semantics https://golang.org/ref/spec#Letters_and_digits - rB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+$`) - rKB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+kb$`) - rMB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+mb$`) - rGB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+gb$`) - rTB = regexp.MustCompile(`(?i)^(?:0b|0x|0o)?[\da-f_]+tb$`) - empt = regexp.MustCompile(`^[\n\t\r 0.\f\a]*$`) - lmt = len(size) - 2 s = []byte(size) ) diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore index 3591f9ff..6e362e4f 100644 --- a/vendor/github.com/Shopify/sarama/.gitignore +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -22,3 +22,6 @@ _cgo_export.* _testmain.go *.exe + +coverage.txt +profile.out diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml new file mode 100644 index 00000000..47624f3d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.golangci.yml @@ -0,0 +1,74 @@ +run: + timeout: 5m + deadline: 10m + +linters-settings: + govet: + check-shadowing: false + golint: + min-confidence: 0 + gocyclo: + min-complexity: 99 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 3 + misspell: + locale: US + goimports: + local-prefixes: github.com/Shopify/sarama + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - wrapperFunc + - ifElseChain + funlen: + lines: 300 + statements: 300 + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + # - dupl + - errcheck + - funlen + # - gocritic + - gocyclo + - gofmt + - goimports + # - golint + - gosec + # - gosimple + - govet + # - ineffassign + - interfacer + # - misspell + # - nakedret + # - scopelint + # - staticcheck + - structcheck + # - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + # - goconst + # - gochecknoinits + +issues: + exclude: + - consider giving a name to these results + - include an explanation for nolint directive diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index cd5d3c49..00000000 --- a/vendor/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go -go: -- 1.6.3 -- 1.7.3 - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=0.9.0.1 - - KAFKA_VERSION=0.10.0.1 - - KAFKA_VERSION=0.10.1.0 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh - -install: -- make install_dependencies - -script: -- make test -- make vet -- make errcheck -- make fmt - -sudo: false diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md index aad986fe..2bebeb10 100644 --- a/vendor/github.com/Shopify/sarama/CHANGELOG.md +++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md @@ -1,7 +1,569 @@ # Changelog +#### Unreleased + +#### Version 1.26.1 (2020-02-04) + +Improvements: +- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539)) +- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595)) +- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573)) +- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592)) + +Bug Fixes: +- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590)) +- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589)) + +#### Version 1.26.0 (2020-01-24) + +New Features: +- Enable zstd compression + ([1574](https://github.com/Shopify/sarama/pull/1574), + [1582](https://github.com/Shopify/sarama/pull/1582)) +- Support headers in tools kafka-console-producer + ([1549](https://github.com/Shopify/sarama/pull/1549)) + +Improvements: +- Add SASL AuthIdentity to SASL frames (authzid) + ([1585](https://github.com/Shopify/sarama/pull/1585)). + +Bug Fixes: +- Sending messages with ZStd compression enabled fails in multiple ways + ([1252](https://github.com/Shopify/sarama/issues/1252)). +- Use the broker for any admin on BrokerConfig + ([1571](https://github.com/Shopify/sarama/pull/1571)). +- Set DescribeConfigRequest Version field + ([1576](https://github.com/Shopify/sarama/pull/1576)). +- ConsumerGroup flooding logs with client/metadata update req + ([1578](https://github.com/Shopify/sarama/pull/1578)). +- MetadataRequest version in DescribeCluster + ([1580](https://github.com/Shopify/sarama/pull/1580)). +- Fix deadlock in consumer group handleError + ([1581](https://github.com/Shopify/sarama/pull/1581)) +- Fill in the Fetch{Request,Response} protocol + ([1582](https://github.com/Shopify/sarama/pull/1582)). +- Retry topic request on ControllerNotAvailable + ([1586](https://github.com/Shopify/sarama/pull/1586)). + +#### Version 1.25.0 (2020-01-13) + +New Features: +- Support TLS protocol in kafka-producer-performance + ([1538](https://github.com/Shopify/sarama/pull/1538)). +- Add support for kafka 2.4.0 + ([1552](https://github.com/Shopify/sarama/pull/1552)). + +Improvements: +- Allow the Consumer to disable auto-commit offsets + ([1164](https://github.com/Shopify/sarama/pull/1164)). +- Produce records with consistent timestamps + ([1455](https://github.com/Shopify/sarama/pull/1455)). + +Bug Fixes: +- Fix incorrect SetTopicMetadata name mentions + ([1534](https://github.com/Shopify/sarama/pull/1534)). +- Fix client.tryRefreshMetadata Println + ([1535](https://github.com/Shopify/sarama/pull/1535)). +- Fix panic on calling updateMetadata on closed client + ([1531](https://github.com/Shopify/sarama/pull/1531)). +- Fix possible faulty metrics in TestFuncProducing + ([1545](https://github.com/Shopify/sarama/pull/1545)). + +#### Version 1.24.1 (2019-10-31) + +New Features: +- Add DescribeLogDirs Request/Response pair + ([1520](https://github.com/Shopify/sarama/pull/1520)). + +Bug Fixes: +- Fix ClusterAdmin returning invalid controller ID on DescribeCluster + ([1518](https://github.com/Shopify/sarama/pull/1518)). +- Fix issue with consumergroup not rebalancing when new partition is added + ([1525](https://github.com/Shopify/sarama/pull/1525)). +- Ensure consistent use of read/write deadlines + ([1529](https://github.com/Shopify/sarama/pull/1529)). + +#### Version 1.24.0 (2019-10-09) + +New Features: +- Add sticky partition assignor + ([1416](https://github.com/Shopify/sarama/pull/1416)). +- Switch from cgo zstd package to pure Go implementation + ([1477](https://github.com/Shopify/sarama/pull/1477)). + +Improvements: +- Allow creating ClusterAdmin from client + ([1415](https://github.com/Shopify/sarama/pull/1415)). +- Set KafkaVersion in ListAcls method + ([1452](https://github.com/Shopify/sarama/pull/1452)). +- Set request version in CreateACL ClusterAdmin method + ([1458](https://github.com/Shopify/sarama/pull/1458)). +- Set request version in DeleteACL ClusterAdmin method + ([1461](https://github.com/Shopify/sarama/pull/1461)). +- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest + ([1464](https://github.com/Shopify/sarama/pull/1464)). +- Remove direct usage of gofork + ([1465](https://github.com/Shopify/sarama/pull/1465)). +- Add support for Go 1.13 + ([1478](https://github.com/Shopify/sarama/pull/1478)). +- Improve behavior of NewMockListAclsResponse + ([1481](https://github.com/Shopify/sarama/pull/1481)). + +Bug Fixes: +- Fix race condition in consumergroup example + ([1434](https://github.com/Shopify/sarama/pull/1434)). +- Fix brokerProducer goroutine leak + ([1442](https://github.com/Shopify/sarama/pull/1442)). +- Use released version of lz4 library + ([1469](https://github.com/Shopify/sarama/pull/1469)). +- Set correct version in MockDeleteTopicsResponse + ([1484](https://github.com/Shopify/sarama/pull/1484)). +- Fix CLI help message typo + ([1494](https://github.com/Shopify/sarama/pull/1494)). + +Known Issues: +- Please **don't** use Zstd, as it doesn't work right now. + See https://github.com/Shopify/sarama/issues/1252 + +#### Version 1.23.1 (2019-07-22) + +Bug Fixes: +- Fix fetch delete bug record + ([1425](https://github.com/Shopify/sarama/pull/1425)). +- Handle SASL/OAUTHBEARER token rejection + ([1428](https://github.com/Shopify/sarama/pull/1428)). + +#### Version 1.23.0 (2019-07-02) + +New Features: +- Add support for Kafka 2.3.0 + ([1418](https://github.com/Shopify/sarama/pull/1418)). +- Add support for ListConsumerGroupOffsets v2 + ([1374](https://github.com/Shopify/sarama/pull/1374)). +- Add support for DeleteConsumerGroup + ([1417](https://github.com/Shopify/sarama/pull/1417)). +- Add support for SASLVersion configuration + ([1410](https://github.com/Shopify/sarama/pull/1410)). +- Add kerberos support + ([1366](https://github.com/Shopify/sarama/pull/1366)). + +Improvements: +- Improve sasl_scram_client example + ([1406](https://github.com/Shopify/sarama/pull/1406)). +- Fix shutdown and race-condition in consumer-group example + ([1404](https://github.com/Shopify/sarama/pull/1404)). +- Add support for error codes 77—81 + ([1397](https://github.com/Shopify/sarama/pull/1397)). +- Pool internal objects allocated per message + ([1385](https://github.com/Shopify/sarama/pull/1385)). +- Reduce packet decoder allocations + ([1373](https://github.com/Shopify/sarama/pull/1373)). +- Support timeout when fetching metadata + ([1359](https://github.com/Shopify/sarama/pull/1359)). + +Bug Fixes: +- Fix fetch size integer overflow + ([1376](https://github.com/Shopify/sarama/pull/1376)). +- Handle and log throttled FetchResponses + ([1383](https://github.com/Shopify/sarama/pull/1383)). +- Refactor misspelled word Resouce to Resource + ([1368](https://github.com/Shopify/sarama/pull/1368)). + +#### Version 1.22.1 (2019-04-29) + +Improvements: +- Use zstd 1.3.8 + ([1350](https://github.com/Shopify/sarama/pull/1350)). +- Add support for SaslHandshakeRequest v1 + ([1354](https://github.com/Shopify/sarama/pull/1354)). + +Bug Fixes: +- Fix V5 MetadataRequest nullable topics array + ([1353](https://github.com/Shopify/sarama/pull/1353)). +- Use a different SCRAM client for each broker connection + ([1349](https://github.com/Shopify/sarama/pull/1349)). +- Fix AllowAutoTopicCreation for MetadataRequest greater than v3 + ([1344](https://github.com/Shopify/sarama/pull/1344)). + +#### Version 1.22.0 (2019-04-09) + +New Features: +- Add Offline Replicas Operation to Client + ([1318](https://github.com/Shopify/sarama/pull/1318)). +- Allow using proxy when connecting to broker + ([1326](https://github.com/Shopify/sarama/pull/1326)). +- Implement ReadCommitted + ([1307](https://github.com/Shopify/sarama/pull/1307)). +- Add support for Kafka 2.2.0 + ([1331](https://github.com/Shopify/sarama/pull/1331)). +- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes + ([1331](https://github.com/Shopify/sarama/pull/1295)). + +Improvements: +- Unregister all broker metrics on broker stop + ([1232](https://github.com/Shopify/sarama/pull/1232)). +- Add SCRAM authentication example + ([1303](https://github.com/Shopify/sarama/pull/1303)). +- Add consumergroup examples + ([1304](https://github.com/Shopify/sarama/pull/1304)). +- Expose consumer batch size metric + ([1296](https://github.com/Shopify/sarama/pull/1296)). +- Add TLS options to console producer and consumer + ([1300](https://github.com/Shopify/sarama/pull/1300)). +- Reduce client close bookkeeping + ([1297](https://github.com/Shopify/sarama/pull/1297)). +- Satisfy error interface in create responses + ([1154](https://github.com/Shopify/sarama/pull/1154)). +- Please lint gods + ([1346](https://github.com/Shopify/sarama/pull/1346)). + +Bug Fixes: +- Fix multi consumer group instance crash + ([1338](https://github.com/Shopify/sarama/pull/1338)). +- Update lz4 to latest version + ([1347](https://github.com/Shopify/sarama/pull/1347)). +- Retry ErrNotCoordinatorForConsumer in new consumergroup session + ([1231](https://github.com/Shopify/sarama/pull/1231)). +- Fix cleanup error handler + ([1332](https://github.com/Shopify/sarama/pull/1332)). +- Fix rate condition in PartitionConsumer + ([1156](https://github.com/Shopify/sarama/pull/1156)). + +#### Version 1.21.0 (2019-02-24) + +New Features: +- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest + ([1236](https://github.com/Shopify/sarama/pull/1236)). +- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests + ([1178](https://github.com/Shopify/sarama/pull/1178)). +- Implement SASL/OAUTHBEARER + ([1240](https://github.com/Shopify/sarama/pull/1240)). + +Improvements: +- Add Go mod support + ([1282](https://github.com/Shopify/sarama/pull/1282)). +- Add error codes 73—76 + ([1239](https://github.com/Shopify/sarama/pull/1239)). +- Add retry backoff function + ([1160](https://github.com/Shopify/sarama/pull/1160)). +- Maintain metadata in the producer even when retries are disabled + ([1189](https://github.com/Shopify/sarama/pull/1189)). +- Include ReplicaAssignment in ListTopics + ([1274](https://github.com/Shopify/sarama/pull/1274)). +- Add producer performance tool + ([1222](https://github.com/Shopify/sarama/pull/1222)). +- Add support LogAppend timestamps + ([1258](https://github.com/Shopify/sarama/pull/1258)). + +Bug Fixes: +- Fix potential deadlock when a heartbeat request fails + ([1286](https://github.com/Shopify/sarama/pull/1286)). +- Fix consuming compacted topic + ([1227](https://github.com/Shopify/sarama/pull/1227)). +- Set correct Kafka version for DescribeConfigsRequest v1 + ([1277](https://github.com/Shopify/sarama/pull/1277)). +- Update kafka test version + ([1273](https://github.com/Shopify/sarama/pull/1273)). + +#### Version 1.20.1 (2019-01-10) + +New Features: +- Add optional replica id in offset request + ([1100](https://github.com/Shopify/sarama/pull/1100)). + +Improvements: +- Implement DescribeConfigs Request + Response v1 & v2 + ([1230](https://github.com/Shopify/sarama/pull/1230)). +- Reuse compression objects + ([1185](https://github.com/Shopify/sarama/pull/1185)). +- Switch from png to svg for GoDoc link in README + ([1243](https://github.com/Shopify/sarama/pull/1243)). +- Fix typo in deprecation notice for FetchResponseBlock.Records + ([1242](https://github.com/Shopify/sarama/pull/1242)). +- Fix typos in consumer metadata response file + ([1244](https://github.com/Shopify/sarama/pull/1244)). + +Bug Fixes: +- Revert to individual msg retries for non-idempotent + ([1203](https://github.com/Shopify/sarama/pull/1203)). +- Respect MaxMessageBytes limit for uncompressed messages + ([1141](https://github.com/Shopify/sarama/pull/1141)). + +#### Version 1.20.0 (2018-12-10) + +New Features: + - Add support for zstd compression + ([#1170](https://github.com/Shopify/sarama/pull/1170)). + - Add support for Idempotent Producer + ([#1152](https://github.com/Shopify/sarama/pull/1152)). + - Add support support for Kafka 2.1.0 + ([#1229](https://github.com/Shopify/sarama/pull/1229)). + - Add support support for OffsetCommit request/response pairs versions v1 to v5 + ([#1201](https://github.com/Shopify/sarama/pull/1201)). + - Add support support for OffsetFetch request/response pair up to version v5 + ([#1198](https://github.com/Shopify/sarama/pull/1198)). + +Improvements: + - Export broker's Rack setting + ([#1173](https://github.com/Shopify/sarama/pull/1173)). + - Always use latest patch version of Go on CI + ([#1202](https://github.com/Shopify/sarama/pull/1202)). + - Add error codes 61 to 72 + ([#1195](https://github.com/Shopify/sarama/pull/1195)). + +Bug Fixes: + - Fix build without cgo + ([#1182](https://github.com/Shopify/sarama/pull/1182)). + - Fix go vet suggestion in consumer group file + ([#1209](https://github.com/Shopify/sarama/pull/1209)). + - Fix typos in code and comments + ([#1228](https://github.com/Shopify/sarama/pull/1228)). + +#### Version 1.19.0 (2018-09-27) + +New Features: + - Implement a higher-level consumer group + ([#1099](https://github.com/Shopify/sarama/pull/1099)). + +Improvements: + - Add support for Go 1.11 + ([#1176](https://github.com/Shopify/sarama/pull/1176)). + +Bug Fixes: + - Fix encoding of `MetadataResponse` with version 2 and higher + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + - Fix race condition in mock async producer + ([#1174](https://github.com/Shopify/sarama/pull/1174)). + +#### Version 1.18.0 (2018-09-07) + +New Features: + - Make `Partitioner.RequiresConsistency` vary per-message + ([#1112](https://github.com/Shopify/sarama/pull/1112)). + - Add customizable partitioner + ([#1118](https://github.com/Shopify/sarama/pull/1118)). + - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`, + `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL` + ([#1055](https://github.com/Shopify/sarama/pull/1055)). + +Improvements: + - Add support for Kafka 2.0.0 + ([#1149](https://github.com/Shopify/sarama/pull/1149)). + - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts + ([#1123](https://github.com/Shopify/sarama/pull/1123)). + - Simpler offset management + ([#1127](https://github.com/Shopify/sarama/pull/1127)). + +Bug Fixes: + - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka + ([#1110](https://github.com/Shopify/sarama/pull/1110)). + - Fix consumer block when response did not contain all the + expected topic/partition blocks + ([#1086](https://github.com/Shopify/sarama/pull/1086)). + - Fix consumer block when response contains only constrol messages + ([#1115](https://github.com/Shopify/sarama/pull/1115)). + - Add timeout config for ClusterAdmin requests + ([#1142](https://github.com/Shopify/sarama/pull/1142)). + - Add version check when producing message with headers + ([#1117](https://github.com/Shopify/sarama/pull/1117)). + - Fix `MetadataRequest` for empty list of topics + ([#1132](https://github.com/Shopify/sarama/pull/1132)). + - Fix producer topic metadata on-demand fetch when topic error happens in metadata response + ([#1125](https://github.com/Shopify/sarama/pull/1125)). + +#### Version 1.17.0 (2018-05-30) + +New Features: + - Add support for gzip compression levels + ([#1044](https://github.com/Shopify/sarama/pull/1044)). + - Add support for Metadata request/response pairs versions v1 to v5 + ([#1047](https://github.com/Shopify/sarama/pull/1047), + [#1069](https://github.com/Shopify/sarama/pull/1069)). + - Add versioning to JoinGroup request/response pairs + ([#1098](https://github.com/Shopify/sarama/pull/1098)) + - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs + ([#1065](https://github.com/Shopify/sarama/pull/1065), + [#1096](https://github.com/Shopify/sarama/pull/1096), + [#1027](https://github.com/Shopify/sarama/pull/1027)). + - Add `Controller()` method to Client interface + ([#1063](https://github.com/Shopify/sarama/pull/1063)). + +Improvements: + - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp + ([#1010](https://github.com/Shopify/sarama/pull/1010)). + - Expose missing protocol parts: `msgSet` and `recordBatch` + ([#1049](https://github.com/Shopify/sarama/pull/1049)). + - Add support for v1 DeleteTopics Request + ([#1052](https://github.com/Shopify/sarama/pull/1052)). + - Add support for Go 1.10 + ([#1064](https://github.com/Shopify/sarama/pull/1064)). + - Claim support for Kafka 1.1.0 + ([#1073](https://github.com/Shopify/sarama/pull/1073)). + +Bug Fixes: + - Fix FindCoordinatorResponse.encode to allow nil Coordinator + ([#1050](https://github.com/Shopify/sarama/pull/1050), + [#1051](https://github.com/Shopify/sarama/pull/1051)). + - Clear all metadata when we have the latest topic info + ([#1033](https://github.com/Shopify/sarama/pull/1033)). + - Make `PartitionConsumer.Close` idempotent + ([#1092](https://github.com/Shopify/sarama/pull/1092)). + +#### Version 1.16.0 (2018-02-12) + +New Features: + - Add support for the Create/Delete Topics request/response pairs + ([#1007](https://github.com/Shopify/sarama/pull/1007), + [#1008](https://github.com/Shopify/sarama/pull/1008)). + - Add support for the Describe/Create/Delete ACL request/response pairs + ([#1009](https://github.com/Shopify/sarama/pull/1009)). + - Add support for the five transaction-related request/response pairs + ([#1016](https://github.com/Shopify/sarama/pull/1016)). + +Improvements: + - Permit setting version on mock producer responses + ([#999](https://github.com/Shopify/sarama/pull/999)). + - Add `NewMockBrokerListener` helper for testing TLS connections + ([#1019](https://github.com/Shopify/sarama/pull/1019)). + - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB + which results in much higher throughput in most cases + ([#1024](https://github.com/Shopify/sarama/pull/1024)). + - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to + reduce CPU and memory usage when processing many partitions + ([#1028](https://github.com/Shopify/sarama/pull/1028)). + - Assign relative offsets to messages in the producer to save the brokers a + recompression pass + ([#1002](https://github.com/Shopify/sarama/pull/1002), + [#1015](https://github.com/Shopify/sarama/pull/1015)). + +Bug Fixes: + - Fix producing uncompressed batches with the new protocol format + ([#1032](https://github.com/Shopify/sarama/issues/1032)). + - Fix consuming compacted topics with the new protocol format + ([#1005](https://github.com/Shopify/sarama/issues/1005)). + - Fix consuming topics with a mix of protocol formats + ([#1021](https://github.com/Shopify/sarama/issues/1021)). + - Fix consuming when the broker includes multiple batches in a single response + ([#1022](https://github.com/Shopify/sarama/issues/1022)). + - Fix detection of `PartialTrailingMessage` when the partial message was + truncated before the magic value indicating its version + ([#1030](https://github.com/Shopify/sarama/pull/1030)). + - Fix expectation-checking in the mock of `SyncProducer.SendMessages` + ([#1035](https://github.com/Shopify/sarama/pull/1035)). + +#### Version 1.15.0 (2017-12-08) + +New Features: + - Claim official support for Kafka 1.0, though it did already work + ([#984](https://github.com/Shopify/sarama/pull/984)). + - Helper methods for Kafka version numbers to/from strings + ([#989](https://github.com/Shopify/sarama/pull/989)). + - Implement CreatePartitions request/response + ([#985](https://github.com/Shopify/sarama/pull/985)). + +Improvements: + - Add error codes 45-60 + ([#986](https://github.com/Shopify/sarama/issues/986)). + +Bug Fixes: + - Fix slow consuming for certain Kafka 0.11/1.0 configurations + ([#982](https://github.com/Shopify/sarama/pull/982)). + - Correctly determine when a FetchResponse contains the new message format + ([#990](https://github.com/Shopify/sarama/pull/990)). + - Fix producing with multiple headers + ([#996](https://github.com/Shopify/sarama/pull/996)). + - Fix handling of truncated record batches + ([#998](https://github.com/Shopify/sarama/pull/998)). + - Fix leaking metrics when closing brokers + ([#991](https://github.com/Shopify/sarama/pull/991)). + +#### Version 1.14.0 (2017-11-13) + +New Features: + - Add support for the new Kafka 0.11 record-batch format, including the wire + protocol and the necessary behavioural changes in the producer and consumer. + Transactions and idempotency are not yet supported, but producing and + consuming should work with all the existing bells and whistles (batching, + compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta + of Arista Networks for this work. Part of + ([#901](https://github.com/Shopify/sarama/issues/901)). + +Bug Fixes: + - Fix encoding of ProduceResponse versions in test + ([#970](https://github.com/Shopify/sarama/pull/970)). + - Return partial replicas list when we have it + ([#975](https://github.com/Shopify/sarama/pull/975)). + +#### Version 1.13.0 (2017-10-04) + +New Features: + - Support for FetchRequest version 3 + ([#905](https://github.com/Shopify/sarama/pull/905)). + - Permit setting version on mock FetchResponses + ([#939](https://github.com/Shopify/sarama/pull/939)). + - Add a configuration option to support storing only minimal metadata for + extremely large clusters + ([#937](https://github.com/Shopify/sarama/pull/937)). + - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets + ([#932](https://github.com/Shopify/sarama/pull/932)). + +Improvements: + - Provide the block-level timestamp when consuming compressed messages + ([#885](https://github.com/Shopify/sarama/issues/885)). + - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned + by the broker, which can be meaningful + ([#930](https://github.com/Shopify/sarama/pull/930)). + - Use a `Ticker` to reduce consumer timer overhead at the cost of higher + variance in the actual timeout + ([#933](https://github.com/Shopify/sarama/pull/933)). + +Bug Fixes: + - Gracefully handle messages with negative timestamps + ([#907](https://github.com/Shopify/sarama/pull/907)). + - Raise a proper error when encountering an unknown message version + ([#940](https://github.com/Shopify/sarama/pull/940)). + +#### Version 1.12.0 (2017-05-08) + +New Features: + - Added support for the `ApiVersions` request and response pair, and Kafka + version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note + that you still need to specify the Kafka version in the Sarama configuration + for the time being. + - Added a `Brokers` method to the Client which returns the complete set of + active brokers ([#813](https://github.com/Shopify/sarama/pull/813)). + - Added an `InSyncReplicas` method to the Client which returns the set of all + in-sync broker IDs for the given partition, now that the Kafka versions for + which this was misleading are no longer in our supported set + ([#872](https://github.com/Shopify/sarama/pull/872)). + - Added a `NewCustomHashPartitioner` method which allows constructing a hash + partitioner with a custom hash method in case the default (FNV-1a) is not + suitable + ([#837](https://github.com/Shopify/sarama/pull/837), + [#841](https://github.com/Shopify/sarama/pull/841)). + +Improvements: + - Recognize more Kafka error codes + ([#859](https://github.com/Shopify/sarama/pull/859)). + +Bug Fixes: + - Fix an issue where decoding a malformed FetchRequest would not return the + correct error ([#818](https://github.com/Shopify/sarama/pull/818)). + - Respect ordering of group protocols in JoinGroupRequests. This fix is + transparent if you're using the `AddGroupProtocol` or + `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from + the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols` + ([#812](https://github.com/Shopify/sarama/issues/812)). + - Fix an alignment-related issue with atomics on 32-bit architectures + ([#859](https://github.com/Shopify/sarama/pull/859)). + #### Version 1.11.0 (2016-12-20) +_Important:_ As of Sarama 1.11 it is necessary to set the config value of +`Producer.Return.Successes` to true in order to use the SyncProducer. Previous +versions would silently override this value when instantiating a SyncProducer +which led to unexpected values and data races. + New Features: - Metrics! Thanks to Sébastien Launay for all his work on this feature ([#701](https://github.com/Shopify/sarama/pull/701), diff --git a/vendor/github.com/Shopify/sarama/MIT-LICENSE b/vendor/github.com/Shopify/sarama/LICENSE similarity index 97% rename from vendor/github.com/Shopify/sarama/MIT-LICENSE rename to vendor/github.com/Shopify/sarama/LICENSE index 8121b63b..d2bf4352 100644 --- a/vendor/github.com/Shopify/sarama/MIT-LICENSE +++ b/vendor/github.com/Shopify/sarama/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013 Evan Huus +Copyright (c) 2013 Shopify Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile index 626b09a5..c3b431a5 100644 --- a/vendor/github.com/Shopify/sarama/Makefile +++ b/vendor/github.com/Shopify/sarama/Makefile @@ -1,21 +1,27 @@ -default: fmt vet errcheck test +default: fmt get update test lint -test: - go test -v -timeout 60s -race ./... +GO := GO111MODULE=on GOPRIVATE=github.com/linkedin GOSUMDB=off go +GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG) +GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic -vet: - go vet ./... +FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go') +TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') -errcheck: - errcheck github.com/Shopify/sarama/... +get: + $(GO) get ./... + $(GO) mod verify + $(GO) mod tidy -fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi +update: + $(GO) get -u -v all + $(GO) mod verify + $(GO) mod tidy -install_dependencies: install_errcheck get +fmt: + gofmt -s -l -w $(FILES) $(TESTS) -install_errcheck: - go get github.com/kisielk/errcheck +lint: + golangci-lint run -get: - go get -t +test: + $(GOTEST) ./... diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md index c2968b92..9b7478d7 100644 --- a/vendor/github.com/Shopify/sarama/README.md +++ b/vendor/github.com/Shopify/sarama/README.md @@ -1,36 +1,36 @@ -sarama -====== +# sarama -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) +[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama) [![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) +[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama) Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later). -### Getting started +## Getting started - API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama). - Mocks for testing are available in the [mocks](./mocks) subpackage. - The [examples](./examples) directory contains more elaborate example applications. - The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -### Compatibility and API stability +You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions). + +## Compatibility and API stability Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are +Go 1.12 through 1.14, and Kafka 2.1 through 2.4, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. A changelog is available [here](CHANGELOG.md). -### Contributing +## Contributing -* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md). -* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more - technical and design details. -* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) - contains a wealth of useful information. -* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. -* If you have any questions, just ask! +- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md). +- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details. +- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information. +- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers. +- If you have any questions, just ask! diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile index f4b848a3..07d7ffb8 100644 --- a/vendor/github.com/Shopify/sarama/Vagrantfile +++ b/vendor/github.com/Shopify/sarama/Vagrantfile @@ -1,14 +1,8 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - # We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB MEMORY = 3072 -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "ubuntu/trusty64" +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/bionic64" config.vm.provision :shell, path: "vagrant/provision.sh" diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go new file mode 100644 index 00000000..50b689d1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_bindings.go @@ -0,0 +1,138 @@ +package sarama + +//Resource holds information about acl resource type +type Resource struct { + ResourceType AclResourceType + ResourceName string + ResourcePatternType AclResourcePatternType +} + +func (r *Resource) encode(pe packetEncoder, version int16) error { + pe.putInt8(int8(r.ResourceType)) + + if err := pe.putString(r.ResourceName); err != nil { + return err + } + + if version == 1 { + if r.ResourcePatternType == AclPatternUnknown { + Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead") + r.ResourcePatternType = AclPatternLiteral + } + pe.putInt8(int8(r.ResourcePatternType)) + } + + return nil +} + +func (r *Resource) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + r.ResourceType = AclResourceType(resourceType) + + if r.ResourceName, err = pd.getString(); err != nil { + return err + } + if version == 1 { + pattern, err := pd.getInt8() + if err != nil { + return err + } + r.ResourcePatternType = AclResourcePatternType(pattern) + } + + return nil +} + +//Acl holds information about acl type +type Acl struct { + Principal string + Host string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *Acl) encode(pe packetEncoder) error { + if err := pe.putString(a.Principal); err != nil { + return err + } + + if err := pe.putString(a.Host); err != nil { + return err + } + + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *Acl) decode(pd packetDecoder, version int16) (err error) { + if a.Principal, err = pd.getString(); err != nil { + return err + } + + if a.Host, err = pd.getString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} + +//ResourceAcls is an acl resource type +type ResourceAcls struct { + Resource + Acls []*Acl +} + +func (r *ResourceAcls) encode(pe packetEncoder, version int16) error { + if err := r.Resource.encode(pe, version); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Acls)); err != nil { + return err + } + for _, acl := range r.Acls { + if err := acl.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *ResourceAcls) decode(pd packetDecoder, version int16) error { + if err := r.Resource.decode(pd, version); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Acls = make([]*Acl, n) + for i := 0; i < n; i++ { + r.Acls[i] = new(Acl) + if err := r.Acls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go new file mode 100644 index 00000000..6d8a70e1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_request.go @@ -0,0 +1,89 @@ +package sarama + +//CreateAclsRequest is an acl creation request +type CreateAclsRequest struct { + Version int16 + AclCreations []*AclCreation +} + +func (c *CreateAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.AclCreations)); err != nil { + return err + } + + for _, aclCreation := range c.AclCreations { + if err := aclCreation.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreations = make([]*AclCreation, n) + + for i := 0; i < n; i++ { + c.AclCreations[i] = new(AclCreation) + if err := c.AclCreations[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsRequest) key() int16 { + return 30 +} + +func (c *CreateAclsRequest) version() int16 { + return c.Version +} + +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateAclsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +//AclCreation is a wrapper around Resource and Acl type +type AclCreation struct { + Resource + Acl +} + +func (a *AclCreation) encode(pe packetEncoder, version int16) error { + if err := a.Resource.encode(pe, version); err != nil { + return err + } + if err := a.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) { + if err := a.Resource.decode(pd, version); err != nil { + return err + } + if err := a.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go new file mode 100644 index 00000000..14b1b9e1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_create_response.go @@ -0,0 +1,94 @@ +package sarama + +import "time" + +//CreateAclsResponse is a an acl response creation type +type CreateAclsResponse struct { + ThrottleTime time.Duration + AclCreationResponses []*AclCreationResponse +} + +func (c *CreateAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil { + return err + } + + for _, aclCreationResponse := range c.AclCreationResponses { + if err := aclCreationResponse.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.AclCreationResponses = make([]*AclCreationResponse, n) + for i := 0; i < n; i++ { + c.AclCreationResponses[i] = new(AclCreationResponse) + if err := c.AclCreationResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateAclsResponse) key() int16 { + return 30 +} + +func (c *CreateAclsResponse) version() int16 { + return 0 +} + +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//AclCreationResponse is an acl creation response type +type AclCreationResponse struct { + Err KError + ErrMsg *string +} + +func (a *AclCreationResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(a.Err)) + + if err := pe.putNullableString(a.ErrMsg); err != nil { + return err + } + + return nil +} + +func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + if a.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go new file mode 100644 index 00000000..41525225 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go @@ -0,0 +1,62 @@ +package sarama + +//DeleteAclsRequest is a delete acl request +type DeleteAclsRequest struct { + Version int + Filters []*AclFilter +} + +func (d *DeleteAclsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Filters)); err != nil { + return err + } + + for _, filter := range d.Filters { + filter.Version = d.Version + if err := filter.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.Filters = make([]*AclFilter, n) + for i := 0; i < n; i++ { + d.Filters[i] = new(AclFilter) + d.Filters[i].Version = int(version) + if err := d.Filters[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsRequest) key() int16 { + return 31 +} + +func (d *DeleteAclsRequest) version() int16 { + return int16(d.Version) +} + +func (c *DeleteAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go new file mode 100644 index 00000000..cb630882 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go @@ -0,0 +1,163 @@ +package sarama + +import "time" + +//DeleteAclsResponse is a delete acl response +type DeleteAclsResponse struct { + Version int16 + ThrottleTime time.Duration + FilterResponses []*FilterResponse +} + +func (d *DeleteAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.FilterResponses)); err != nil { + return err + } + + for _, filterResponse := range d.FilterResponses { + if err := filterResponse.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.FilterResponses = make([]*FilterResponse, n) + + for i := 0; i < n; i++ { + d.FilterResponses[i] = new(FilterResponse) + if err := d.FilterResponses[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DeleteAclsResponse) key() int16 { + return 31 +} + +func (d *DeleteAclsResponse) version() int16 { + return d.Version +} + +func (d *DeleteAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//FilterResponse is a filter response type +type FilterResponse struct { + Err KError + ErrMsg *string + MatchingAcls []*MatchingAcl +} + +func (f *FilterResponse) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(f.Err)) + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil { + return err + } + for _, matchingAcl := range f.MatchingAcls { + if err := matchingAcl.encode(pe, version); err != nil { + return err + } + } + + return nil +} + +func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(kerr) + + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + f.MatchingAcls = make([]*MatchingAcl, n) + for i := 0; i < n; i++ { + f.MatchingAcls[i] = new(MatchingAcl) + if err := f.MatchingAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +//MatchingAcl is a matching acl type +type MatchingAcl struct { + Err KError + ErrMsg *string + Resource + Acl +} + +func (m *MatchingAcl) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(m.Err)) + if err := pe.putNullableString(m.ErrMsg); err != nil { + return err + } + + if err := m.Resource.encode(pe, version); err != nil { + return err + } + + if err := m.Acl.encode(pe); err != nil { + return err + } + + return nil +} + +func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + m.Err = KError(kerr) + + if m.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + if err := m.Resource.decode(pd, version); err != nil { + return err + } + + if err := m.Acl.decode(pd, version); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go new file mode 100644 index 00000000..29841a5c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go @@ -0,0 +1,39 @@ +package sarama + +//DescribeAclsRequest is a secribe acl request type +type DescribeAclsRequest struct { + Version int + AclFilter +} + +func (d *DescribeAclsRequest) encode(pe packetEncoder) error { + d.AclFilter.Version = d.Version + return d.AclFilter.encode(pe) +} + +func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) { + d.Version = int(version) + d.AclFilter.Version = int(version) + return d.AclFilter.decode(pd, version) +} + +func (d *DescribeAclsRequest) key() int16 { + return 29 +} + +func (d *DescribeAclsRequest) version() int16 { + return int16(d.Version) +} + +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 +} + +func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go new file mode 100644 index 00000000..c43408b2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go @@ -0,0 +1,91 @@ +package sarama + +import "time" + +//DescribeAclsResponse is a describe acl response type +type DescribeAclsResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + ResourceAcls []*ResourceAcls +} + +func (d *DescribeAclsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(d.Err)) + + if err := pe.putNullableString(d.ErrMsg); err != nil { + return err + } + + if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil { + return err + } + + for _, resourceAcl := range d.ResourceAcls { + if err := resourceAcl.encode(pe, d.Version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + d.Err = KError(kerr) + + errmsg, err := pd.getString() + if err != nil { + return err + } + if errmsg != "" { + d.ErrMsg = &errmsg + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + d.ResourceAcls = make([]*ResourceAcls, n) + + for i := 0; i < n; i++ { + d.ResourceAcls[i] = new(ResourceAcls) + if err := d.ResourceAcls[i].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (d *DescribeAclsResponse) key() int16 { + return 29 +} + +func (d *DescribeAclsResponse) version() int16 { + return d.Version +} + +func (d *DescribeAclsResponse) headerVersion() int16 { + return 0 +} + +func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go new file mode 100644 index 00000000..fad55587 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_filter.go @@ -0,0 +1,78 @@ +package sarama + +type AclFilter struct { + Version int + ResourceType AclResourceType + ResourceName *string + ResourcePatternTypeFilter AclResourcePatternType + Principal *string + Host *string + Operation AclOperation + PermissionType AclPermissionType +} + +func (a *AclFilter) encode(pe packetEncoder) error { + pe.putInt8(int8(a.ResourceType)) + if err := pe.putNullableString(a.ResourceName); err != nil { + return err + } + + if a.Version == 1 { + pe.putInt8(int8(a.ResourcePatternTypeFilter)) + } + + if err := pe.putNullableString(a.Principal); err != nil { + return err + } + if err := pe.putNullableString(a.Host); err != nil { + return err + } + pe.putInt8(int8(a.Operation)) + pe.putInt8(int8(a.PermissionType)) + + return nil +} + +func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) { + resourceType, err := pd.getInt8() + if err != nil { + return err + } + a.ResourceType = AclResourceType(resourceType) + + if a.ResourceName, err = pd.getNullableString(); err != nil { + return err + } + + if a.Version == 1 { + pattern, err := pd.getInt8() + + if err != nil { + return err + } + + a.ResourcePatternTypeFilter = AclResourcePatternType(pattern) + } + + if a.Principal, err = pd.getNullableString(); err != nil { + return err + } + + if a.Host, err = pd.getNullableString(); err != nil { + return err + } + + operation, err := pd.getInt8() + if err != nil { + return err + } + a.Operation = AclOperation(operation) + + permissionType, err := pd.getInt8() + if err != nil { + return err + } + a.PermissionType = AclPermissionType(permissionType) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go new file mode 100644 index 00000000..c10ad7b9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/acl_types.go @@ -0,0 +1,55 @@ +package sarama + +type ( + AclOperation int + + AclPermissionType int + + AclResourceType int + + AclResourcePatternType int +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java +const ( + AclOperationUnknown AclOperation = iota + AclOperationAny + AclOperationAll + AclOperationRead + AclOperationWrite + AclOperationCreate + AclOperationDelete + AclOperationAlter + AclOperationDescribe + AclOperationClusterAction + AclOperationDescribeConfigs + AclOperationAlterConfigs + AclOperationIdempotentWrite +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java +const ( + AclPermissionUnknown AclPermissionType = iota + AclPermissionAny + AclPermissionDeny + AclPermissionAllow +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +const ( + AclResourceUnknown AclResourceType = iota + AclResourceAny + AclResourceTopic + AclResourceGroup + AclResourceCluster + AclResourceTransactionalID +) + +// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +const ( + AclPatternUnknown AclResourcePatternType = iota + AclPatternAny + AclPatternMatch + AclPatternLiteral + AclPatternPrefixed +) diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go new file mode 100644 index 00000000..95586f9a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go @@ -0,0 +1,57 @@ +package sarama + +//AddOffsetsToTxnRequest adds offsets to a transaction request +type AddOffsetsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + GroupID string +} + +func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + if err := pe.putString(a.GroupID); err != nil { + return err + } + + return nil +} + +func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.GroupID, err = pd.getString(); err != nil { + return err + } + return nil +} + +func (a *AddOffsetsToTxnRequest) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go new file mode 100644 index 00000000..bdb18441 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go @@ -0,0 +1,49 @@ +package sarama + +import ( + "time" +) + +//AddOffsetsToTxnResponse is a response type for adding offsets to txns +type AddOffsetsToTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(a.Err)) + return nil +} + +func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + a.Err = KError(kerr) + + return nil +} + +func (a *AddOffsetsToTxnResponse) key() int16 { + return 25 +} + +func (a *AddOffsetsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go new file mode 100644 index 00000000..6289f451 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go @@ -0,0 +1,81 @@ +package sarama + +//AddPartitionsToTxnRequest is a add paartition request +type AddPartitionsToTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TopicPartitions map[string][]int32 +} + +func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + pe.putInt64(a.ProducerID) + pe.putInt16(a.ProducerEpoch) + + if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil { + return err + } + for topic, partitions := range a.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.TopicPartitions = make(map[string][]int32) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + + a.TopicPartitions[topic] = partitions + } + + return nil +} + +func (a *AddPartitionsToTxnRequest) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnRequest) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go new file mode 100644 index 00000000..73b73b07 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go @@ -0,0 +1,114 @@ +package sarama + +import ( + "time" +) + +//AddPartitionsToTxnResponse is a partition errors to transaction type +type AddPartitionsToTxnResponse struct { + ThrottleTime time.Duration + Errors map[string][]*PartitionError +} + +func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(a.Errors)); err != nil { + return err + } + + for topic, e := range a.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Errors[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + a.Errors[topic][j] = new(PartitionError) + if err := a.Errors[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *AddPartitionsToTxnResponse) key() int16 { + return 24 +} + +func (a *AddPartitionsToTxnResponse) version() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + +func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +//PartitionError is a partition error type +type PartitionError struct { + Partition int32 + Err KError +} + +func (p *PartitionError) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt16(int16(p.Err)) + return nil +} + +func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + p.Err = KError(kerr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go new file mode 100644 index 00000000..0430d984 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/admin.go @@ -0,0 +1,934 @@ +package sarama + +import ( + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "time" +) + +// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics, +// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0. +// Methods with stricter requirements will specify the minimum broker version required. +// You MUST call Close() on a client to avoid leaks +type ClusterAdmin interface { + // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher. + // It may take several seconds after CreateTopic returns success for all the brokers + // to become aware that the topic has been created. During this time, listTopics + // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0. + CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error + + // List the topics available in the cluster with the default options. + ListTopics() (map[string]TopicDetail, error) + + // Describe some topics in the cluster. + DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) + + // Delete a topic. It may take several seconds after the DeleteTopic to returns success + // and for all the brokers to become aware that the topics are gone. + // During this time, listTopics may continue to return information about the deleted topic. + // If delete.topic.enable is false on the brokers, deleteTopic will mark + // the topic for deletion, but not actually delete them. + // This operation is supported by brokers with version 0.10.1.0 or higher. + DeleteTopic(topic string) error + + // Increase the number of partitions of the topics according to the corresponding values. + // If partitions are increased for a topic that has a key, the partition logic or ordering of + // the messages will be affected. It may take several seconds after this method returns + // success for all the brokers to become aware that the partitions have been created. + // During this time, ClusterAdmin#describeTopics may not return information about the + // new partitions. This operation is supported by brokers with version 1.0.0 or higher. + CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + + // Delete records whose offset is smaller than the given offset of the corresponding partition. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteRecords(topic string, partitionOffsets map[int32]int64) error + + // Get the configuration for the specified resources. + // The returned configuration includes default values and the Default is true + // can be used to distinguish them from user supplied values. + // Config entries where ReadOnly is true cannot be updated. + // The value of config entries where Sensitive is true is always nil so + // sensitive information is not disclosed. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) + + // Update the configuration for the specified resources with the default options. + // This operation is supported by brokers with version 0.11.0.0 or higher. + // The resources with their configs (topic is the only resource type with configs + // that can be updated currently Updates are not transactional so they may succeed + // for some resources while fail for others. The configs for a particular resource are updated automatically. + AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error + + // Creates access control lists (ACLs) which are bound to specific resources. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but + // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher. + CreateACL(resource Resource, acl Acl) error + + // Lists access control lists (ACLs) according to the supplied filter. + // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls + // This operation is supported by brokers with version 0.11.0.0 or higher. + ListAcls(filter AclFilter) ([]ResourceAcls, error) + + // Deletes access control lists (ACLs) according to the supplied filters. + // This operation is not transactional so it may succeed for some ACLs while fail for others. + // This operation is supported by brokers with version 0.11.0.0 or higher. + DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + + // List the consumer groups available in the cluster. + ListConsumerGroups() (map[string]string, error) + + // Describe the given consumer groups. + DescribeConsumerGroups(groups []string) ([]*GroupDescription, error) + + // List the consumer group offsets available in the cluster. + ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) + + // Delete a consumer group. + DeleteConsumerGroup(group string) error + + // Get information about the nodes in the cluster + DescribeCluster() (brokers []*Broker, controllerID int32, err error) + + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + + // Close shuts down the admin and closes underlying client. + Close() error +} + +type clusterAdmin struct { + client Client + conf *Config +} + +// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration. +func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + return NewClusterAdminFromClient(client) +} + +// NewClusterAdminFromClient creates a new ClusterAdmin using the given client. +// Note that underlying client will also be closed on admin's Close() call. +func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) { + //make sure we can retrieve the controller + _, err := client.Controller() + if err != nil { + return nil, err + } + + ca := &clusterAdmin{ + client: client, + conf: client.Config(), + } + return ca, nil +} + +func (ca *clusterAdmin) Close() error { + return ca.client.Close() +} + +func (ca *clusterAdmin) Controller() (*Broker, error) { + return ca.client.Controller() +} + +func (ca *clusterAdmin) refreshController() (*Broker, error) { + return ca.client.RefreshController() +} + +// isErrNoController returns `true` if the given error type unwraps to an +// `ErrNotController` response from Kafka +func isErrNoController(err error) bool { + switch e := err.(type) { + case *TopicError: + return e.Err == ErrNotController + case *TopicPartitionError: + return e.Err == ErrNotController + case KError: + return e == ErrNotController + } + return false +} + +// retryOnError will repeatedly call the given (error-returning) func in the +// case that its response is non-nil and retriable (as determined by the +// provided retriable func) up to the maximum number of tries permitted by +// the admin client configuration +func (ca *clusterAdmin) retryOnError(retriable func(error) bool, fn func() error) error { + var err error + for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ { + err = fn() + if err == nil || !retriable(err) { + return err + } + Logger.Printf( + "admin/request retrying after %dms... (%d attempts remaining)\n", + ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt) + time.Sleep(ca.conf.Admin.Retry.Backoff) + continue + } + return err +} + +func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + if detail == nil { + return errors.New("you must specify topic details") + } + + topicDetails := make(map[string]*TopicDetail) + topicDetails[topic] = detail + + request := &CreateTopicsRequest{ + TopicDetails: topicDetails, + ValidateOnly: validateOnly, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 2 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreateTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, err + } + + request := &MetadataRequest{ + Topics: topics, + AllowAutoTopicCreation: false, + } + + if ca.conf.Version.IsAtLeast(V1_0_0_0) { + request.Version = 5 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, err + } + return response.Topics, nil +} + +func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { + controller, err := ca.Controller() + if err != nil { + return nil, int32(0), err + } + + request := &MetadataRequest{ + Topics: []string{}, + } + + if ca.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 1 + } + + response, err := controller.GetMetadata(request) + if err != nil { + return nil, int32(0), err + } + + return response.Brokers, response.ControllerID, nil +} + +func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) { + brokers := ca.client.Brokers() + for _, b := range brokers { + if b.ID() == id { + return b, nil + } + } + return nil, fmt.Errorf("could not find broker id %d", id) +} + +func (ca *clusterAdmin) findAnyBroker() (*Broker, error) { + brokers := ca.client.Brokers() + if len(brokers) > 0 { + index := rand.Intn(len(brokers)) + return brokers[index], nil + } + return nil, errors.New("no available broker") +} + +func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) { + // In order to build TopicDetails we need to first get the list of all + // topics using a MetadataRequest and then get their configs using a + // DescribeConfigsRequest request. To avoid sending many requests to the + // broker, we use a single DescribeConfigsRequest. + + // Send the all-topic MetadataRequest + b, err := ca.findAnyBroker() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + metadataReq := &MetadataRequest{} + metadataResp, err := b.GetMetadata(metadataReq) + if err != nil { + return nil, err + } + + topicsDetailsMap := make(map[string]TopicDetail) + + var describeConfigsResources []*ConfigResource + + for _, topic := range metadataResp.Topics { + topicDetails := TopicDetail{ + NumPartitions: int32(len(topic.Partitions)), + } + if len(topic.Partitions) > 0 { + topicDetails.ReplicaAssignment = map[int32][]int32{} + for _, partition := range topic.Partitions { + topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas + } + topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas)) + } + topicsDetailsMap[topic.Name] = topicDetails + + // we populate the resources we want to describe from the MetadataResponse + topicResource := ConfigResource{ + Type: TopicResource, + Name: topic.Name, + } + describeConfigsResources = append(describeConfigsResources, &topicResource) + } + + // Send the DescribeConfigsRequest + describeConfigsReq := &DescribeConfigsRequest{ + Resources: describeConfigsResources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + describeConfigsReq.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + describeConfigsReq.Version = 2 + } + + describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq) + if err != nil { + return nil, err + } + + for _, resource := range describeConfigsResp.Resources { + topicDetails := topicsDetailsMap[resource.Name] + topicDetails.ConfigEntries = make(map[string]*string) + + for _, entry := range resource.Configs { + // only include non-default non-sensitive config + // (don't actually think topic config will ever be sensitive) + if entry.Default || entry.Sensitive { + continue + } + topicDetails.ConfigEntries[entry.Name] = &entry.Value + } + + topicsDetailsMap[resource.Name] = topicDetails + } + + return topicsDetailsMap, nil +} + +func (ca *clusterAdmin) DeleteTopic(topic string) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &DeleteTopicsRequest{ + Topics: []string{topic}, + Timeout: ca.conf.Admin.Timeout, + } + + if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.DeleteTopics(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicErrorCodes[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr != ErrNoError { + if topicErr == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error { + if topic == "" { + return ErrInvalidTopic + } + + topicPartitions := make(map[string]*TopicPartition) + topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment} + + request := &CreatePartitionsRequest{ + TopicPartitions: topicPartitions, + Timeout: ca.conf.Admin.Timeout, + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + rsp, err := b.CreatePartitions(request) + if err != nil { + return err + } + + topicErr, ok := rsp.TopicPartitionErrors[topic] + if !ok { + return ErrIncompleteResponse + } + + if topicErr.Err != ErrNoError { + if topicErr.Err == ErrNotController { + _, _ = ca.refreshController() + } + return topicErr + } + + return nil + }) +} + +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} + +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { + if topic == "" { + return nil, ErrInvalidTopic + } + + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + request.AddBlock(topic, partitions) + + b, err := ca.Controller() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err + } +} + +func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { + if topic == "" { + return ErrInvalidTopic + } + partitionPerBroker := make(map[*Broker][]int32) + for partition := range partitionOffsets { + broker, err := ca.client.Leader(topic, partition) + if err != nil { + return err + } + if _, ok := partitionPerBroker[broker]; ok { + partitionPerBroker[broker] = append(partitionPerBroker[broker], partition) + } else { + partitionPerBroker[broker] = []int32{partition} + } + } + errs := make([]error, 0) + for broker, partitions := range partitionPerBroker { + topics := make(map[string]*DeleteRecordsRequestTopic) + recordsToDelete := make(map[int32]int64) + for _, p := range partitions { + recordsToDelete[p] = partitionOffsets[p] + } + topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete} + request := &DeleteRecordsRequest{ + Topics: topics, + Timeout: ca.conf.Admin.Timeout, + } + + rsp, err := broker.DeleteRecords(request) + if err != nil { + errs = append(errs, err) + } else { + deleteRecordsResponseTopic, ok := rsp.Topics[topic] + if !ok { + errs = append(errs, ErrIncompleteResponse) + } else { + for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions { + if deleteRecordsResponsePartition.Err != ErrNoError { + errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error())) + } + } + } + } + } + if len(errs) > 0 { + return ErrDeleteRecords{MultiError{&errs}} + } + //todo since we are dealing with couple of partitions it would be good if we return slice of errors + //for each partition instead of one error + return nil +} + +// Returns a bool indicating whether the resource request needs to go to a +// specific broker +func dependsOnSpecificNode(resource ConfigResource) bool { + return (resource.Type == BrokerResource && resource.Name != "") || + resource.Type == BrokerLoggerResource +} + +func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) { + var entries []ConfigEntry + var resources []*ConfigResource + resources = append(resources, &resource) + + request := &DescribeConfigsRequest{ + Resources: resources, + } + + if ca.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 1 + } + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 2 + } + + var ( + b *Broker + err error + ) + + // DescribeConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(resource) { + id, _ := strconv.Atoi(resource.Name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return nil, err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.DescribeConfigs(request) + if err != nil { + return nil, err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == resource.Name { + if rspResource.ErrorMsg != "" { + return nil, errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } + for _, cfgEntry := range rspResource.Configs { + entries = append(entries, *cfgEntry) + } + } + } + return entries, nil +} + +func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error { + var resources []*AlterConfigsResource + resources = append(resources, &AlterConfigsResource{ + Type: resourceType, + Name: name, + ConfigEntries: entries, + }) + + request := &AlterConfigsRequest{ + Resources: resources, + ValidateOnly: validateOnly, + } + + var ( + b *Broker + err error + ) + + // AlterConfig of broker/broker logger must be sent to the broker in question + if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) { + id, _ := strconv.Atoi(name) + b, err = ca.findBroker(int32(id)) + } else { + b, err = ca.findAnyBroker() + } + if err != nil { + return err + } + + _ = b.Open(ca.client.Config()) + rsp, err := b.AlterConfigs(request) + if err != nil { + return err + } + + for _, rspResource := range rsp.Resources { + if rspResource.Name == name { + if rspResource.ErrorMsg != "" { + return errors.New(rspResource.ErrorMsg) + } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } + } + } + return nil +} + +func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error { + var acls []*AclCreation + acls = append(acls, &AclCreation{resource, acl}) + request := &CreateAclsRequest{AclCreations: acls} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return err + } + + _, err = b.CreateAcls(request) + return err +} + +func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) { + request := &DescribeAclsRequest{AclFilter: filter} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DescribeAcls(request) + if err != nil { + return nil, err + } + + var lAcls []ResourceAcls + for _, rAcl := range rsp.ResourceAcls { + lAcls = append(lAcls, *rAcl) + } + return lAcls, nil +} + +func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) { + var filters []*AclFilter + filters = append(filters, &filter) + request := &DeleteAclsRequest{Filters: filters} + + if ca.conf.Version.IsAtLeast(V2_0_0_0) { + request.Version = 1 + } + + b, err := ca.Controller() + if err != nil { + return nil, err + } + + rsp, err := b.DeleteAcls(request) + if err != nil { + return nil, err + } + + var mAcls []MatchingAcl + for _, fr := range rsp.FilterResponses { + for _, mACL := range fr.MatchingAcls { + mAcls = append(mAcls, *mACL) + } + } + return mAcls, nil +} + +func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { + groupsPerBroker := make(map[*Broker][]string) + + for _, group := range groups { + controller, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + } + + for broker, brokerGroups := range groupsPerBroker { + response, err := broker.DescribeGroups(&DescribeGroupsRequest{ + Groups: brokerGroups, + }) + if err != nil { + return nil, err + } + + result = append(result, response.Groups...) + } + return result, nil +} + +func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) { + allGroups = make(map[string]string) + + // Query brokers in parallel, since we have to query *all* brokers + brokers := ca.client.Brokers() + groupMaps := make(chan map[string]string, len(brokers)) + errors := make(chan error, len(brokers)) + wg := sync.WaitGroup{} + + for _, b := range brokers { + wg.Add(1) + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.ListGroups(&ListGroupsRequest{}) + if err != nil { + errors <- err + return + } + + groups := make(map[string]string) + for group, typ := range response.Groups { + groups[group] = typ + } + + groupMaps <- groups + }(b, ca.conf) + } + + wg.Wait() + close(groupMaps) + close(errors) + + for groupMap := range groupMaps { + for group, protocolType := range groupMap { + allGroups[group] = protocolType + } + } + + // Intentionally return only the first error for simplicity + err = <-errors + return +} + +func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return nil, err + } + + request := &OffsetFetchRequest{ + ConsumerGroup: group, + partitions: topicPartitions, + } + + if ca.conf.Version.IsAtLeast(V0_10_2_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_8_2_2) { + request.Version = 1 + } + + return coordinator.FetchOffset(request) +} + +func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + request := &DeleteGroupsRequest{ + Groups: []string{group}, + } + + resp, err := coordinator.DeleteGroups(request) + if err != nil { + return err + } + + groupErr, ok := resp.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if groupErr != ErrNoError { + return groupErr + } + + return nil +} + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errors := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errors <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errors) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errors + return +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go new file mode 100644 index 00000000..c88bb604 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go @@ -0,0 +1,126 @@ +package sarama + +//AlterConfigsRequest is an alter config request type +type AlterConfigsRequest struct { + Resources []*AlterConfigsResource + ValidateOnly bool +} + +//AlterConfigsResource is an alter config resource type +type AlterConfigsResource struct { + Type ConfigResourceType + Name string + ConfigEntries map[string]*string +} + +func (a *AlterConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for _, r := range a.Resources { + if err := r.encode(pe); err != nil { + return err + } + } + + pe.putBool(a.ValidateOnly) + return nil +} + +func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error { + resourceCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResource, resourceCount) + for i := range a.Resources { + r := &AlterConfigsResource{} + err = r.decode(pd, version) + if err != nil { + return err + } + a.Resources[i] = r + } + + validateOnly, err := pd.getBool() + if err != nil { + return err + } + + a.ValidateOnly = validateOnly + + return nil +} + +func (a *AlterConfigsResource) encode(pe packetEncoder) error { + pe.putInt8(int8(a.Type)) + + if err := pe.putString(a.Name); err != nil { + return err + } + + if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range a.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error { + t, err := pd.getInt8() + if err != nil { + return err + } + a.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + a.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + return err +} + +func (a *AlterConfigsRequest) key() int16 { + return 33 +} + +func (a *AlterConfigsRequest) version() int16 { + return 0 +} + +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + +func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go new file mode 100644 index 00000000..3266f927 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go @@ -0,0 +1,101 @@ +package sarama + +import "time" + +//AlterConfigsResponse is a response type for alter config +type AlterConfigsResponse struct { + ThrottleTime time.Duration + Resources []*AlterConfigsResourceResponse +} + +//AlterConfigsResourceResponse is a response type for alter config resource +type AlterConfigsResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string +} + +func (a *AlterConfigsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(a.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(a.Resources)); err != nil { + return err + } + + for i := range a.Resources { + pe.putInt16(a.Resources[i].ErrorCode) + err := pe.putString(a.Resources[i].ErrorMsg) + if err != nil { + return nil + } + pe.putInt8(int8(a.Resources[i].Type)) + err = pe.putString(a.Resources[i].Name) + if err != nil { + return nil + } + } + + return nil +} + +func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + responseCount, err := pd.getArrayLength() + if err != nil { + return err + } + + a.Resources = make([]*AlterConfigsResourceResponse, responseCount) + + for i := range a.Resources { + a.Resources[i] = new(AlterConfigsResourceResponse) + + errCode, err := pd.getInt16() + if err != nil { + return err + } + a.Resources[i].ErrorCode = errCode + + e, err := pd.getString() + if err != nil { + return err + } + a.Resources[i].ErrorMsg = e + + t, err := pd.getInt8() + if err != nil { + return err + } + a.Resources[i].Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + a.Resources[i].Name = name + } + + return nil +} + +func (a *AlterConfigsResponse) key() int16 { + return 32 +} + +func (a *AlterConfigsResponse) version() int16 { + return 0 +} + +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + +func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go new file mode 100644 index 00000000..f0a2f9dd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go new file mode 100644 index 00000000..b3f9a15f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go index ab65f01c..d67c5e1e 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_request.go +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -1,24 +1,29 @@ package sarama +//ApiVersionsRequest ... type ApiVersionsRequest struct { } -func (r *ApiVersionsRequest) encode(pe packetEncoder) error { +func (a *ApiVersionsRequest) encode(pe packetEncoder) error { return nil } -func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { +func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { return nil } -func (r *ApiVersionsRequest) key() int16 { +func (a *ApiVersionsRequest) key() int16 { return 18 } -func (r *ApiVersionsRequest) version() int16 { +func (a *ApiVersionsRequest) version() int16 { return 0 } -func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + +func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go index 16d62db2..d09e8d9e 100644 --- a/vendor/github.com/Shopify/sarama/api_versions_response.go +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -1,5 +1,6 @@ package sarama +//ApiVersionsResponseBlock is an api version response block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 @@ -31,6 +32,7 @@ func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { return nil } +//ApiVersionsResponse is an api version response type type ApiVersionsResponse struct { Err KError ApiVersions []*ApiVersionsResponseBlock @@ -50,12 +52,13 @@ func (r *ApiVersionsResponse) encode(pe packetEncoder) error { } func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + numBlocks, err := pd.getArrayLength() if err != nil { return err @@ -81,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 { return 0 } +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go index 3af47fdd..d0ce01b6 100644 --- a/vendor/github.com/Shopify/sarama/async_producer.go +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -1,6 +1,7 @@ package sarama import ( + "encoding/binary" "fmt" "sync" "time" @@ -17,24 +18,23 @@ import ( // scope. type AsyncProducer interface { - // AsyncClose triggers a shutdown of the producer, flushing any messages it may - // have buffered. The shutdown has completed when both the Errors and Successes - // channels have been closed. When calling AsyncClose, you *must* continue to - // read from those channels in order to drain the results of any messages in - // flight. + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. AsyncClose() - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. Close() error // Input is the input channel for the user to write messages to that they // wish to send. Input() chan<- *ProducerMessage - // Successes is the success output channel back to the user when AckSuccesses is + // Successes is the success output channel back to the user when Return.Successes is // enabled. If Return.Successes is true, you MUST read from this channel or the // Producer will deadlock. It is suggested that you send and read messages // together in a single select statement. @@ -47,18 +47,78 @@ type AsyncProducer interface { Errors() <-chan *ProducerError } +// transactionManager keeps the state necessary to ensure idempotent production +type transactionManager struct { + producerID int64 + producerEpoch int16 + sequenceNumbers map[string]int32 + mutex sync.Mutex +} + +const ( + noProducerID = -1 + noProducerEpoch = -1 +) + +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { + key := fmt.Sprintf("%s-%d", topic, partition) + t.mutex.Lock() + defer t.mutex.Unlock() + sequence := t.sequenceNumbers[key] + t.sequenceNumbers[key] = sequence + 1 + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch +} + +func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { + txnmgr := &transactionManager{ + producerID: noProducerID, + producerEpoch: noProducerEpoch, + } + + if conf.Producer.Idempotent { + initProducerIDResponse, err := client.InitProducerID() + if err != nil { + return nil, err + } + txnmgr.producerID = initProducerIDResponse.ProducerID + txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch + txnmgr.sequenceNumbers = make(map[string]int32) + txnmgr.mutex = sync.Mutex{} + + Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch) + } + + return txnmgr, nil +} + type asyncProducer struct { - client Client - conf *Config - ownClient bool + client Client + conf *Config errors chan *ProducerError input, successes, retries chan *ProducerMessage inFlight sync.WaitGroup - brokers map[*Broker]chan<- *ProducerMessage - brokerRefs map[chan<- *ProducerMessage]int + brokers map[*Broker]*brokerProducer + brokerRefs map[*brokerProducer]int brokerLock sync.Mutex + + txnmgr *transactionManager } // NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. @@ -67,23 +127,29 @@ func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { if err != nil { return nil, err } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil + return newAsyncProducer(client) } // NewAsyncProducerFromClient creates a new Producer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this producer. func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newAsyncProducer(cli) +} + +func newAsyncProducer(client Client) (AsyncProducer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } + txnmgr, err := newTransactionManager(client.Config(), client) + if err != nil { + return nil, err + } + p := &asyncProducer{ client: client, conf: client.Config(), @@ -91,8 +157,9 @@ func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { input: make(chan *ProducerMessage), successes: make(chan *ProducerMessage), retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]chan<- *ProducerMessage), - brokerRefs: make(map[chan<- *ProducerMessage]int), + brokers: make(map[*Broker]*brokerProducer), + brokerRefs: make(map[*brokerProducer]int), + txnmgr: txnmgr, } // launch our singleton dispatchers @@ -120,6 +187,10 @@ type ProducerMessage struct { // StringEncoder and ByteEncoder. Value Encoder + // The headers are key-value pairs that are transparently passed + // by Kafka between producers and consumers. + Headers []RecordHeader + // This field is used to hold arbitrary data you wish to include so it // will be available when receiving on the Successes and Errors channels. // Sarama completely ignores this field and is only to be used for @@ -135,20 +206,39 @@ type ProducerMessage struct { // Partition is the partition that the message was sent to. This is only // guaranteed to be defined if the message was successfully delivered. Partition int32 - // Timestamp is the timestamp assigned to the message by the broker. This - // is only guaranteed to be defined if the message was successfully - // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at - // least version 0.10.0. + // Timestamp can vary in behaviour depending on broker configuration, being + // in either one of the CreateTime or LogAppendTime modes (default CreateTime), + // and requiring version at least 0.10.0. + // + // When configured to CreateTime, the timestamp is specified by the producer + // either by explicitly setting this field, or when the message is added + // to a produce set. + // + // When configured to LogAppendTime, the timestamp assigned to the message + // by the broker. This is only guaranteed to be defined if the message was + // successfully delivered and RequiredAcks is not NoResponse. Timestamp time.Time - retries int - flags flagSet + retries int + flags flagSet + expectation chan *ProducerError + sequenceNumber int32 + producerEpoch int16 + hasSequence bool } const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. -func (m *ProducerMessage) byteSize() int { - size := producerMessageOverhead +func (m *ProducerMessage) byteSize(version int) int { + var size int + if version >= 2 { + size = maximumRecordOverhead + for _, h := range m.Headers { + size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32 + } + } else { + size = producerMessageOverhead + } if m.Key != nil { size += m.Key.Length() } @@ -161,6 +251,9 @@ func (m *ProducerMessage) byteSize() int { func (m *ProducerMessage) clear() { m.flags = 0 m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false } // ProducerError is the type of error generated when the producer fails to deliver a message. @@ -200,7 +293,7 @@ func (p *asyncProducer) Close() error { if p.conf.Producer.Return.Successes { go withRecover(func() { - for _ = range p.successes { + for range p.successes { } }) } @@ -255,7 +348,14 @@ func (p *asyncProducer) dispatcher() { p.inFlight.Add(1) } - if msg.byteSize() > p.conf.Producer.MaxMessageBytes { + version := 1 + if p.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } else if msg.Headers != nil { + p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11")) + continue + } + if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes { p.returnError(msg, ErrMessageSizeTooLarge) continue } @@ -327,7 +427,14 @@ func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { var partitions []int32 err := tp.breaker.Run(func() (err error) { - if tp.partitioner.RequiresConsistency() { + requiresConsistency := false + if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok { + requiresConsistency = ep.MessageRequiresConsistency(msg) + } else { + requiresConsistency = tp.partitioner.RequiresConsistency() + } + + if requiresConsistency { partitions, err = tp.parent.client.Partitions(msg.Topic) } else { partitions, err = tp.parent.client.WritablePartitions(msg.Topic) @@ -367,9 +474,9 @@ type partitionProducer struct { partition int32 input <-chan *ProducerMessage - leader *Broker - breaker *breaker.Breaker - output chan<- *ProducerMessage + leader *Broker + breaker *breaker.Breaker + brokerProducer *brokerProducer // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, // all other messages get buffered in retryState[msg.retries].buf to preserve ordering @@ -399,21 +506,53 @@ func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan return input } +func (pp *partitionProducer) backoff(retries int) { + var backoff time.Duration + if pp.parent.conf.Producer.Retry.BackoffFunc != nil { + maxRetries := pp.parent.conf.Producer.Retry.Max + backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries) + } else { + backoff = pp.parent.conf.Producer.Retry.Backoff + } + if backoff > 0 { + time.Sleep(backoff) + } +} + func (pp *partitionProducer) dispatch() { // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` // on the first message pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) if pp.leader != nil { - pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} } + defer func() { + if pp.brokerProducer != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + } + }() + for msg := range pp.input { + if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil { + select { + case <-pp.brokerProducer.abandoned: + // a message on the abandoned channel means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + default: + // producer connection is still open. + } + } + if msg.retries > pp.highWatermark { // a new, higher, retry level; handle it and then back off pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) } else if pp.highWatermark > 0 { // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level if msg.retries < pp.highWatermark { @@ -438,20 +577,25 @@ func (pp *partitionProducer) dispatch() { // if we made it this far then the current msg contains real data, and can be sent to the next goroutine // without breaking any of our ordering guarantees - if pp.output == nil { + if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + pp.backoff(msg.retries) continue } Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } - pp.output <- msg - } + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } - if pp.output != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.brokerProducer.input <- msg } } @@ -463,12 +607,12 @@ func (pp *partitionProducer) newHighWatermark(hwm int) { // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) pp.retryState[pp.highWatermark].expectChaser = true pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} // a new HWM means that our current broker selection is out of date Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - pp.output = nil + pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer) + pp.brokerProducer = nil } func (pp *partitionProducer) flushRetryBuffers() { @@ -476,7 +620,7 @@ func (pp *partitionProducer) flushRetryBuffers() { for { pp.highWatermark-- - if pp.output == nil { + if pp.brokerProducer == nil { if err := pp.updateLeader(); err != nil { pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) goto flushDone @@ -485,7 +629,7 @@ func (pp *partitionProducer) flushRetryBuffers() { } for _, msg := range pp.retryState[pp.highWatermark].buf { - pp.output <- msg + pp.brokerProducer.input <- msg } flushDone: @@ -510,16 +654,16 @@ func (pp *partitionProducer) updateLeader() error { return err } - pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader) pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} return nil }) } // one per broker; also constructs an associated flusher -func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { +func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer { var ( input = make(chan *ProducerMessage) bridge = make(chan *produceSet) @@ -532,6 +676,7 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag input: input, output: bridge, responses: responses, + stopchan: make(chan struct{}), buffer: newProduceSet(p), currentRetries: make(map[string]map[int32]error), } @@ -553,7 +698,11 @@ func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessag close(responses) }) - return input + if p.conf.Producer.Retry.Max <= 0 { + bp.abandoned = make(chan struct{}) + } + + return bp } type brokerProducerResponse struct { @@ -568,9 +717,11 @@ type brokerProducer struct { parent *asyncProducer broker *Broker - input <-chan *ProducerMessage + input chan *ProducerMessage output chan<- *produceSet responses <-chan *brokerProducerResponse + abandoned chan struct{} + stopchan chan struct{} buffer *produceSet timer <-chan time.Time @@ -586,12 +737,17 @@ func (bp *brokerProducer) run() { for { select { - case msg := <-bp.input: - if msg == nil { + case msg, ok := <-bp.input: + if !ok { + Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID()) bp.shutdown() return } + if msg == nil { + continue + } + if msg.flags&syn == syn { Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", bp.broker.ID(), msg.Topic, msg.Partition) @@ -617,12 +773,21 @@ func (bp *brokerProducer) run() { } if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { bp.parent.retryMessage(msg, err) continue } } + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } if err := bp.buffer.add(msg); err != nil { bp.parent.returnError(msg, err) continue @@ -635,8 +800,14 @@ func (bp *brokerProducer) run() { bp.timerFired = true case output <- bp.buffer: bp.rollOver() - case response := <-bp.responses: - bp.handleResponse(response) + case response, ok := <-bp.responses: + if ok { + bp.handleResponse(response) + } + case <-bp.stopchan: + Logger.Printf( + "producer/broker/%d run loop asked to stop\n", bp.broker.ID()) + return } if bp.timerFired || bp.buffer.readyToFlush() { @@ -660,7 +831,7 @@ func (bp *brokerProducer) shutdown() { for response := range bp.responses { bp.handleResponse(response) } - + close(bp.stopchan) Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) } @@ -672,9 +843,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { return bp.currentRetries[msg.Topic][msg.Partition] } -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { for { select { case response := <-bp.responses: @@ -682,7 +851,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { // handling a response can change our state, so re-check some things if reason := bp.needsRetry(msg); reason != nil { return reason - } else if !bp.buffer.wouldOverflow(msg) { + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { return nil } case bp.output <- bp.buffer: @@ -713,16 +882,17 @@ func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { // we iterate through the blocks in the request set, not the response, so that we notice // if the response is missing a block completely - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + var retryTopics []string + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { if response == nil { // this only happens when RequiredAcks is NoResponse, so we have to assume success - bp.parent.returnSuccesses(msgs) + bp.parent.returnSuccesses(pSet.msgs) return } block := response.GetBlock(topic, partition) if block == nil { - bp.parent.returnErrors(msgs, ErrIncompleteResponse) + bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse) return } @@ -730,45 +900,115 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo // Success case ErrNoError: if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { - for _, msg := range msgs { + for _, msg := range pSet.msgs { msg.Timestamp = block.Timestamp } } - for i, msg := range msgs { + for i, msg := range pSet.msgs { msg.Offset = block.Offset + int64(i) } - bp.parent.returnSuccesses(msgs) + bp.parent.returnSuccesses(pSet.msgs) + // Duplicate + case ErrDuplicateSequenceNumber: + bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", - bp.broker.ID(), topic, partition, block.Err) - bp.currentRetries[topic][partition] = block.Err - bp.parent.retryMessages(msgs, block.Err) - bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + bp.parent.returnErrors(pSet.msgs, block.Err) + } else { + retryTopics = append(retryTopics, topic) + } // Other non-retriable errors default: - bp.parent.returnErrors(msgs, block.Err) + if bp.parent.conf.Producer.Retry.Max <= 0 { + bp.parent.abandonBrokerConnection(bp.broker) + } + bp.parent.returnErrors(pSet.msgs, block.Err) } }) + + if len(retryTopics) > 0 { + if bp.parent.conf.Producer.Idempotent { + err := bp.parent.client.RefreshMetadata(retryTopics...) + if err != nil { + Logger.Printf("Failed refreshing metadata because of %v\n", err) + } + } + + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + block := response.GetBlock(topic, partition) + if block == nil { + // handled in the previous "eachPartition" loop + return + } + + switch block.Err { + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + if bp.currentRetries[topic] == nil { + bp.currentRetries[topic] = make(map[int32]error) + } + bp.currentRetries[topic][partition] = block.Err + if bp.parent.conf.Producer.Idempotent { + go bp.parent.retryBatch(topic, partition, pSet, block.Err) + } else { + bp.parent.retryMessages(pSet.msgs, block.Err) + } + // dropping the following messages has the side effect of incrementing their retry count + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + } + }) + } +} + +func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) { + Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr) + produceSet := newProduceSet(p) + produceSet.msgs[topic] = make(map[int32]*partitionSet) + produceSet.msgs[topic][partition] = pSet + produceSet.bufferBytes += pSet.bufferBytes + produceSet.bufferCount += len(pSet.msgs) + for _, msg := range pSet.msgs { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, kerr) + return + } + msg.retries++ + } + + // it's expected that a metadata refresh has been requested prior to calling retryBatch + leader, err := p.client.Leader(topic, partition) + if err != nil { + Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err) + for _, msg := range pSet.msgs { + p.returnError(msg, kerr) + } + return + } + bp := p.getBrokerProducer(leader) + bp.output <- produceSet } func (bp *brokerProducer) handleError(sent *produceSet, err error) { switch err.(type) { case PacketEncodingError: - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.returnErrors(msgs, err) + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.returnErrors(pSet.msgs, err) }) default: Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) bp.parent.abandonBrokerConnection(bp.broker) _ = bp.broker.Close() bp.closing = err - sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) + sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) }) - bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { - bp.parent.retryMessages(msgs, err) + bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) { + bp.parent.retryMessages(pSet.msgs, err) }) bp.rollOver() } @@ -810,11 +1050,9 @@ func (p *asyncProducer) shutdown() { p.inFlight.Wait() - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) } close(p.input) @@ -824,6 +1062,12 @@ func (p *asyncProducer) shutdown() { } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { @@ -865,7 +1109,7 @@ func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { } } -func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { +func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer { p.brokerLock.Lock() defer p.brokerLock.Unlock() @@ -882,13 +1126,13 @@ func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessag return bp } -func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) { p.brokerLock.Lock() defer p.brokerLock.Unlock() p.brokerRefs[bp]-- if p.brokerRefs[bp] == 0 { - close(bp) + close(bp.input) delete(p.brokerRefs, bp) if p.brokers[broker] == bp { @@ -901,5 +1145,10 @@ func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { p.brokerLock.Lock() defer p.brokerLock.Unlock() + bc, ok := p.brokers[broker] + if ok && bc.abandoned != nil { + close(bc.abandoned) + } + delete(p.brokers, broker) } diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go new file mode 100644 index 00000000..d9789a02 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/balance_strategy.go @@ -0,0 +1,1061 @@ +package sarama + +import ( + "container/heap" + "math" + "sort" + "strings" +) + +const ( + // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy + RangeBalanceStrategyName = "range" + + // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy + RoundRobinBalanceStrategyName = "roundrobin" + + // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy + StickyBalanceStrategyName = "sticky" + + defaultGeneration = -1 +) + +// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt. +// It contains an allocation of topic/partitions by memberID in the form of +// a `memberID -> topic -> partitions` map. +type BalanceStrategyPlan map[string]map[string][]int32 + +// Add assigns a topic with a number partitions to a member. +func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) { + if len(partitions) == 0 { + return + } + if _, ok := p[memberID]; !ok { + p[memberID] = make(map[string][]int32, 1) + } + p[memberID][topic] = append(p[memberID][topic], partitions...) +} + +// -------------------------------------------------------------------- + +// BalanceStrategy is used to balance topics and partitions +// across members of a consumer group +type BalanceStrategy interface { + // Name uniquely identifies the strategy. + Name() string + + // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions` + // and returns a distribution plan. + Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) + + // AssignmentData returns the serialized assignment data for the specified + // memberID + AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) +} + +// -------------------------------------------------------------------- + +// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members. +// Example with one topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 1, 2]} +// M2: {T: [3, 4, 5]} +var BalanceStrategyRange = &balanceStrategy{ + name: RangeBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + step := float64(len(partitions)) / float64(len(memberIDs)) + + for i, memberID := range memberIDs { + pos := float64(i) + min := int(math.Floor(pos*step + 0.5)) + max := int(math.Floor((pos+1)*step + 0.5)) + plan.Add(memberID, topic, partitions[min:max]...) + } + }, +} + +// BalanceStrategyRoundRobin assigns partitions to members in alternating order. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +var BalanceStrategyRoundRobin = &balanceStrategy{ + name: RoundRobinBalanceStrategyName, + coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) { + for i, part := range partitions { + memberID := memberIDs[i%len(memberIDs)] + plan.Add(memberID, topic, part) + } + }, +} + +// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments +// while maintain a balanced partition distribution. +// Example with topic T with six partitions (0..5) and two members (M1, M2): +// M1: {T: [0, 2, 4]} +// M2: {T: [1, 3, 5]} +// +// On reassignment with an additional consumer, you might get an assignment plan like: +// M1: {T: [0, 2]} +// M2: {T: [1, 3]} +// M3: {T: [4, 5]} +// +var BalanceStrategySticky = &stickyBalanceStrategy{} + +// -------------------------------------------------------------------- + +type balanceStrategy struct { + name string + coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) +} + +// Name implements BalanceStrategy. +func (s *balanceStrategy) Name() string { return s.name } + +// Plan implements BalanceStrategy. +func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // Build members by topic map + mbt := make(map[string][]string) + for memberID, meta := range members { + for _, topic := range meta.Topics { + mbt[topic] = append(mbt[topic], memberID) + } + } + + // Sort members for each topic + for topic, memberIDs := range mbt { + sort.Sort(&balanceStrategySortable{ + topic: topic, + memberIDs: memberIDs, + }) + } + + // Assemble plan + plan := make(BalanceStrategyPlan, len(members)) + for topic, memberIDs := range mbt { + s.coreFn(plan, memberIDs, topic, topics[topic]) + } + return plan, nil +} + +// AssignmentData simple strategies do not require any shared assignment data +func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return nil, nil +} + +type balanceStrategySortable struct { + topic string + memberIDs []string +} + +func (p balanceStrategySortable) Len() int { return len(p.memberIDs) } +func (p balanceStrategySortable) Swap(i, j int) { + p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i] +} +func (p balanceStrategySortable) Less(i, j int) bool { + return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j]) +} + +func balanceStrategyHashValue(vv ...string) uint32 { + h := uint32(2166136261) + for _, s := range vv { + for _, c := range s { + h ^= uint32(c) + h *= 16777619 + } + } + return h +} + +type stickyBalanceStrategy struct { + movements partitionMovements +} + +// Name implements BalanceStrategy. +func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName } + +// Plan implements BalanceStrategy. +func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) { + // track partition movements during generation of the partition assignment plan + s.movements = partitionMovements{ + Movements: make(map[topicPartitionAssignment]consumerPair), + PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool), + } + + // prepopulate the current assignment state from userdata on the consumer group members + currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members) + if err != nil { + return nil, err + } + + // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state + isFreshAssignment := false + if len(currentAssignment) == 0 { + isFreshAssignment = true + } + + // create a mapping of all current topic partitions and the consumers that can be assigned to them + partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string) + for topic, partitions := range topics { + for _, partition := range partitions { + partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{} + } + } + + // create a mapping of all consumers to all potential topic partitions that can be assigned to them + // also, populate the mapping of partitions to potential consumers + consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members)) + for memberID, meta := range members { + consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0) + for _, topicSubscription := range meta.Topics { + // only evaluate topic subscriptions that are present in the supplied topics map + if _, found := topics[topicSubscription]; found { + for _, partition := range topics[topicSubscription] { + topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition} + consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition) + partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID) + } + } + } + + // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist + if _, exists := currentAssignment[memberID]; !exists { + currentAssignment[memberID] = make([]topicPartitionAssignment, 0) + } + } + + // create a mapping of each partition to its current consumer, where possible + currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment)) + unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unvisitedPartitions[partition] = true + } + var unassignedPartitions []topicPartitionAssignment + for memberID, partitions := range currentAssignment { + var keepPartitions []topicPartitionAssignment + for _, partition := range partitions { + // If this partition no longer exists at all, likely due to the + // topic being deleted, we remove the partition from the member. + if _, exists := partition2AllPotentialConsumers[partition]; !exists { + continue + } + delete(unvisitedPartitions, partition) + currentPartitionConsumers[partition] = memberID + + if !strsContains(members[memberID].Topics, partition.Topic) { + unassignedPartitions = append(unassignedPartitions, partition) + continue + } + keepPartitions = append(keepPartitions, partition) + } + currentAssignment[memberID] = keepPartitions + } + for unvisited := range unvisitedPartitions { + unassignedPartitions = append(unassignedPartitions, unvisited) + } + + // sort the topic partitions in order of priority for reassignment + sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions) + + // at this point we have preserved all valid topic partition to consumer assignments and removed + // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions + // to consumers so that the topic partition assignments are as balanced as possible. + + // an ascending sorted set of consumers based on how many topic partitions are already assigned to them + sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment) + s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers) + + // Assemble plan + plan := make(BalanceStrategyPlan, len(currentAssignment)) + for memberID, assignments := range currentAssignment { + if len(assignments) == 0 { + plan[memberID] = make(map[string][]int32) + } else { + for _, assignment := range assignments { + plan.Add(memberID, assignment.Topic, assignment.Partition) + } + } + } + return plan, nil +} + +// AssignmentData serializes the set of topics currently assigned to the +// specified member as part of the supplied balance plan +func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) { + return encode(&StickyAssignorUserDataV1{ + Topics: topics, + Generation: generationID, + }, nil) +} + +func strsContains(s []string, value string) bool { + for _, entry := range s { + if entry == value { + return true + } + } + return false +} + +// Balance assignments across consumers for maximum fairness and stickiness. +func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) { + initializing := false + if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 { + initializing = true + } + + // assign all unassigned partitions + for _, partition := range unassignedPartitions { + // skip if there is no potential consumer for the partition + if len(partition2AllPotentialConsumers[partition]) == 0 { + continue + } + sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer) + } + + // narrow down the reassignment scope to only those partitions that can actually be reassigned + for partition := range partition2AllPotentialConsumers { + if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition) + } + } + + // narrow down the reassignment scope to only those consumers that are subject to reassignment + fixedAssignments := make(map[string][]topicPartitionAssignment) + for memberID := range consumer2AllPotentialPartitions { + if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) { + fixedAssignments[memberID] = currentAssignment[memberID] + delete(currentAssignment, memberID) + sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) + } + } + + // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later + preBalanceAssignment := deepCopyAssignment(currentAssignment) + preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer)) + for k, v := range currentPartitionConsumer { + preBalancePartitionConsumers[k] = v + } + + reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer) + + // if we are not preserving existing assignments and we have made changes to the current assignment + // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment + if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) { + currentAssignment = deepCopyAssignment(preBalanceAssignment) + currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers)) + for k, v := range preBalancePartitionConsumers { + currentPartitionConsumer[k] = v + } + } + + // add the fixed assignments (those that could not change) back + for consumer, assignments := range fixedAssignments { + currentAssignment[consumer] = assignments + } +} + +// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs. +// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0. +// Lower balance score indicates a more balanced assignment. +func getBalanceScore(assignment map[string][]topicPartitionAssignment) int { + consumer2AssignmentSize := make(map[string]int, len(assignment)) + for memberID, partitions := range assignment { + consumer2AssignmentSize[memberID] = len(partitions) + } + + var score float64 + for memberID, consumerAssignmentSize := range consumer2AssignmentSize { + delete(consumer2AssignmentSize, memberID) + for _, otherConsumerAssignmentSize := range consumer2AssignmentSize { + score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize)) + } + } + return int(score) +} + +// Determine whether the current assignment plan is balanced. +func isBalanced(currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, allSubscriptions map[string][]topicPartitionAssignment) bool { + sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment) + min := len(currentAssignment[sortedCurrentSubscriptions[0]]) + max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]]) + if min >= max-1 { + // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true + return true + } + + // create a mapping from partitions to the consumer assigned to them + allPartitions := make(map[topicPartitionAssignment]string) + for memberID, partitions := range currentAssignment { + for _, partition := range partitions { + if _, exists := allPartitions[partition]; exists { + Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition) + } + allPartitions[partition] = memberID + } + } + + // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it + // could but did not get cannot be moved to it (because that would break the balance) + for _, memberID := range sortedCurrentSubscriptions { + consumerPartitions := currentAssignment[memberID] + consumerPartitionCount := len(consumerPartitions) + + // skip if this consumer already has all the topic partitions it can get + if consumerPartitionCount == len(allSubscriptions[memberID]) { + continue + } + + // otherwise make sure it cannot get any more + potentialTopicPartitions := allSubscriptions[memberID] + for _, partition := range potentialTopicPartitions { + if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) { + otherConsumer := allPartitions[partition] + otherConsumerPartitionCount := len(currentAssignment[otherConsumer]) + if consumerPartitionCount < otherConsumerPartitionCount { + return false + } + } + } + } + return true +} + +// Reassign all topic partitions that need reassignment until balanced. +func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool { + reassignmentPerformed := false + modified := false + + // repeat reassignment until no partition can be moved to improve the balance + for { + modified = false + // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed) + // until the full list is processed or a balance is achieved + for _, partition := range reassignablePartitions { + if isBalanced(currentAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions) { + break + } + + // the partition must have at least two consumers + if len(partition2AllPotentialConsumers[partition]) <= 1 { + Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition) + } + + // the partition must have a consumer + consumer := currentPartitionConsumer[partition] + if consumer == "" { + Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition) + } + + if _, exists := prevAssignment[partition]; exists { + if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) { + sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID) + reassignmentPerformed = true + modified = true + continue + } + } + + // check if a better-suited consumer exists for the partition; if so, reassign it + for _, otherConsumer := range partition2AllPotentialConsumers[partition] { + if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) { + sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions) + reassignmentPerformed = true + modified = true + break + } + } + } + if !modified { + return reassignmentPerformed + } + } +} + +// Identify a new consumer for a topic partition and reassign it. +func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string { + for _, anotherConsumer := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) { + return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer) + } + } + return sortedCurrentSubscriptions +} + +// Reassign a specific partition to a new consumer +func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string { + consumer := currentPartitionConsumer[partition] + // find the correct partition movement considering the stickiness requirement + partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer) + return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer) +} + +// Track the movement of a topic partition after assignment +func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + oldConsumer := currentPartitionConsumer[partition] + s.movements.movePartition(partition, oldConsumer, newConsumer) + + currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition) + currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition) + currentPartitionConsumer[partition] = newConsumer + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Determine whether a specific consumer should be considered for topic partition assignment. +func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + currentPartitions := currentAssignment[memberID] + currentAssignmentSize := len(currentPartitions) + maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID]) + if currentAssignmentSize > maxAssignmentSize { + Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID) + } + if currentAssignmentSize < maxAssignmentSize { + // if a consumer is not assigned all its potential partitions it is subject to reassignment + return true + } + for _, partition := range currentPartitions { + if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) { + return true + } + } + return false +} + +// Only consider reassigning those topic partitions that have two or more potential consumers. +func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool { + return len(partition2AllPotentialConsumers[partition]) >= 2 +} + +// The assignment should improve the overall balance of the partition assignments to consumers. +func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string { + for _, memberID := range sortedCurrentSubscriptions { + if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) { + currentAssignment[memberID] = append(currentAssignment[memberID], partition) + currentPartitionConsumer[partition] = memberID + break + } + } + return sortMemberIDsByPartitionAssignments(currentAssignment) +} + +// Deserialize topic partition assignment data to aid with creation of a sticky assignment. +func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) { + userDataV1 := &StickyAssignorUserDataV1{} + if err := decode(userDataBytes, userDataV1); err != nil { + userDataV0 := &StickyAssignorUserDataV0{} + if err := decode(userDataBytes, userDataV0); err != nil { + return nil, err + } + return userDataV0, nil + } + return userDataV1, nil +} + +// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited +// to those topic partitions currently reported by the Kafka cluster. +func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment { + assignments := deepCopyAssignment(currentAssignment) + for memberID, partitions := range assignments { + // perform in-place filtering + i := 0 + for _, partition := range partitions { + if _, exists := partition2AllPotentialConsumers[partition]; exists { + partitions[i] = partition + i++ + } + } + assignments[memberID] = partitions[:i] + } + return assignments +} + +func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment { + for i, assignment := range assignments { + if assignment == topic { + return append(assignments[:i], assignments[i+1:]...) + } + } + return assignments +} + +func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool { + for _, assignment := range assignments { + if assignment == topic { + return true + } + } + return false +} + +func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment { + unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers)) + for partition := range partition2AllPotentialConsumers { + unassignedPartitions[partition] = true + } + + sortedPartitions := make([]topicPartitionAssignment, 0) + if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) { + // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics) + // then we just need to simply list partitions in a round robin fashion (from consumers with + // most assigned partitions to those with least) + assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers) + + // use priority-queue to evaluate consumer group members in descending-order based on + // the number of topic partition assignments (i.e. consumers with most assignments first) + pq := make(assignmentPriorityQueue, len(assignments)) + i := 0 + for consumerID, consumerAssignments := range assignments { + pq[i] = &consumerGroupMember{ + id: consumerID, + assignments: consumerAssignments, + } + i++ + } + heap.Init(&pq) + + for { + // loop until no consumer-group members remain + if pq.Len() == 0 { + break + } + member := pq[0] + + // partitions that were assigned to a different consumer last time + var prevPartitionIndex int + for i, partition := range member.assignments { + if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists { + prevPartitionIndex = i + break + } + } + + if len(member.assignments) > 0 { + partition := member.assignments[prevPartitionIndex] + sortedPartitions = append(sortedPartitions, partition) + delete(unassignedPartitions, partition) + if prevPartitionIndex == 0 { + member.assignments = member.assignments[1:] + } else { + member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...) + } + heap.Fix(&pq, 0) + } else { + heap.Pop(&pq) + } + } + + for partition := range unassignedPartitions { + sortedPartitions = append(sortedPartitions, partition) + } + } else { + // an ascending sorted set of topic partitions based on how many consumers can potentially use them + sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers) + } + return sortedPartitions +} + +func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string { + // sort the members by the number of partition assignments in ascending order + sortedMemberIDs := make([]string, 0, len(assignments)) + for memberID := range assignments { + sortedMemberIDs = append(sortedMemberIDs, memberID) + } + sort.SliceStable(sortedMemberIDs, func(i, j int) bool { + ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]]) + if ret == 0 { + return sortedMemberIDs[i] < sortedMemberIDs[j] + } + return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]]) + }) + return sortedMemberIDs +} + +func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment { + // sort the members by the number of partition assignments in descending order + sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers)) + i := 0 + for partition := range partition2AllPotentialConsumers { + sortedPartionIDs[i] = partition + i++ + } + sort.Slice(sortedPartionIDs, func(i, j int) bool { + if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) { + ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic) + if ret == 0 { + return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition + } + return ret < 0 + } + return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) + }) + return sortedPartionIDs +} + +func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment { + copy := make(map[string][]topicPartitionAssignment, len(assignment)) + for memberID, subscriptions := range assignment { + copy[memberID] = append(subscriptions[:0:0], subscriptions...) + } + return copy +} + +func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool { + curMembers := make(map[string]int) + for _, cur := range partition2AllPotentialConsumers { + if len(curMembers) == 0 { + for _, curMembersElem := range cur { + curMembers[curMembersElem]++ + } + continue + } + + if len(curMembers) != len(cur) { + return false + } + + yMap := make(map[string]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curMembers { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + + curPartitions := make(map[topicPartitionAssignment]int) + for _, cur := range consumer2AllPotentialPartitions { + if len(curPartitions) == 0 { + for _, curPartitionElem := range cur { + curPartitions[curPartitionElem]++ + } + continue + } + + if len(curPartitions) != len(cur) { + return false + } + + yMap := make(map[topicPartitionAssignment]int) + for _, yElem := range cur { + yMap[yElem]++ + } + + for curMembersMapKey, curMembersMapVal := range curPartitions { + if yMap[curMembersMapKey] != curMembersMapVal { + return false + } + } + } + return true +} + +// We need to process subscriptions' user data with each consumer's reported generation in mind +// higher generations overwrite lower generations in case of a conflict +// note that a conflict could exist only if user data is for different generations +func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) { + currentAssignment := make(map[string][]topicPartitionAssignment) + prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair) + + // for each partition we create a sorted map of its consumers by generation + sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string) + for memberID, meta := range members { + consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData) + if err != nil { + return nil, nil, err + } + for _, partition := range consumerUserData.partitions() { + if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists { + if consumerUserData.hasGeneration() { + if _, generationExists := consumers[consumerUserData.generation()]; generationExists { + // same partition is assigned to two consumers during the same rebalance. + // log a warning and skip this record + Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation()) + continue + } else { + consumers[consumerUserData.generation()] = memberID + } + } else { + consumers[defaultGeneration] = memberID + } + } else { + generation := defaultGeneration + if consumerUserData.hasGeneration() { + generation = consumerUserData.generation() + } + sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID} + } + } + } + + // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition + // current and previous consumers are the last two consumers of each partition in the above sorted map + for partition, consumers := range sortedPartitionConsumersByGeneration { + // sort consumers by generation in decreasing order + var generations []int + for generation := range consumers { + generations = append(generations, generation) + } + sort.Sort(sort.Reverse(sort.IntSlice(generations))) + + consumer := consumers[generations[0]] + if _, exists := currentAssignment[consumer]; !exists { + currentAssignment[consumer] = []topicPartitionAssignment{partition} + } else { + currentAssignment[consumer] = append(currentAssignment[consumer], partition) + } + + // check for previous assignment, if any + if len(generations) > 1 { + prevAssignment[partition] = consumerGenerationPair{ + MemberID: consumers[generations[1]], + Generation: generations[1], + } + } + } + return currentAssignment, prevAssignment, nil +} + +type consumerGenerationPair struct { + MemberID string + Generation int +} + +// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment. +type consumerPair struct { + SrcMemberID string + DstMemberID string +} + +// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers. +type partitionMovements struct { + PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool + Movements map[topicPartitionAssignment]consumerPair +} + +func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair { + pair := p.Movements[partition] + delete(p.Movements, partition) + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + delete(partitionMovementsForThisTopic[pair], partition) + if len(partitionMovementsForThisTopic[pair]) == 0 { + delete(partitionMovementsForThisTopic, pair) + } + if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 { + delete(p.PartitionMovementsByTopic, partition.Topic) + } + return pair +} + +func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) { + p.Movements[partition] = pair + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + if _, exists := partitionMovementsForThisTopic[pair]; !exists { + partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool) + } + partitionMovementsForThisTopic[pair][partition] = true +} + +func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) { + pair := consumerPair{ + SrcMemberID: oldConsumer, + DstMemberID: newConsumer, + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + existingPair := p.removeMovementRecordOfPartition(partition) + if existingPair.DstMemberID != oldConsumer { + Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer) + } + if existingPair.SrcMemberID != newConsumer { + // the partition is not moving back to its previous consumer + p.addPartitionMovementRecord(partition, consumerPair{ + SrcMemberID: existingPair.SrcMemberID, + DstMemberID: newConsumer, + }) + } + } else { + p.addPartitionMovementRecord(partition, pair) + } +} + +func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment { + if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists { + return partition + } + if _, exists := p.Movements[partition]; exists { + // this partition has previously moved + if oldConsumer != p.Movements[partition].DstMemberID { + Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer) + } + oldConsumer = p.Movements[partition].SrcMemberID + } + + partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic] + reversePair := consumerPair{ + SrcMemberID: newConsumer, + DstMemberID: oldConsumer, + } + if _, exists := partitionMovementsForThisTopic[reversePair]; !exists { + return partition + } + var reversePairPartition topicPartitionAssignment + for otherPartition := range partitionMovementsForThisTopic[reversePair] { + reversePairPartition = otherPartition + } + return reversePairPartition +} + +func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { + if src == dst { + return currentPath, false + } + if len(pairs) == 0 { + return currentPath, false + } + for _, pair := range pairs { + if src == pair.SrcMemberID && dst == pair.DstMemberID { + currentPath = append(currentPath, src, dst) + return currentPath, true + } + } + + for _, pair := range pairs { + if pair.SrcMemberID == src { + // create a deep copy of the pairs, excluding the current pair + reducedSet := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedSet[i] = pair + i++ + } + } + + currentPath = append(currentPath, pair.SrcMemberID) + return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath) + } + } + return currentPath, false +} + +func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { + superCycle := make([]string, len(cycle)-1) + for i := 0; i < len(cycle)-1; i++ { + superCycle[i] = cycle[i] + } + superCycle = append(superCycle, cycle...) + for _, foundCycle := range cycles { + if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 { + return true + } + } + return false +} + +func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { + cycles := make([][]string, 0) + for _, pair := range pairs { + // create a deep copy of the pairs, excluding the current pair + reducedPairs := make([]consumerPair, len(pairs)-1) + i := 0 + for _, p := range pairs { + if p != pair { + reducedPairs[i] = pair + i++ + } + } + if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked { + if !p.in(path, cycles) { + cycles = append(cycles, path) + Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path) + } + } + } + + // for now we want to make sure there is no partition movements of the same topic between a pair of consumers. + // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized + // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases. + for _, cycle := range cycles { + if len(cycle) == 3 { + return true + } + } + return false +} + +func (p *partitionMovements) isSticky() bool { + for topic, movements := range p.PartitionMovementsByTopic { + movementPairs := make([]consumerPair, len(movements)) + i := 0 + for pair := range movements { + movementPairs[i] = pair + i++ + } + if p.hasCycles(movementPairs) { + Logger.Printf("Stickiness is violated for topic %s", topic) + Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements) + return false + } + } + return true +} + +func indexOfSubList(source []string, target []string) int { + targetSize := len(target) + maxCandidate := len(source) - targetSize +nextCand: + for candidate := 0; candidate <= maxCandidate; candidate++ { + j := candidate + for i := 0; i < targetSize; i++ { + if target[i] != source[j] { + // Element mismatch, try next cand + continue nextCand + } + j++ + } + // All elements of candidate matched target + return candidate + } + return -1 +} + +type consumerGroupMember struct { + id string + assignments []topicPartitionAssignment +} + +// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted +// in descending order (most assignments to least assignments). +type assignmentPriorityQueue []*consumerGroupMember + +func (pq assignmentPriorityQueue) Len() int { return len(pq) } + +func (pq assignmentPriorityQueue) Less(i, j int) bool { + // order asssignment priority queue in descending order using assignment-count/member-id + if len(pq[i].assignments) == len(pq[j].assignments) { + return strings.Compare(pq[i].id, pq[j].id) > 0 + } + return len(pq[i].assignments) > len(pq[j].assignments) +} + +func (pq assignmentPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *assignmentPriorityQueue) Push(x interface{}) { + member := x.(*consumerGroupMember) + *pq = append(*pq, member) +} + +func (pq *assignmentPriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + member := old[n-1] + *pq = old[0 : n-1] + return member +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go index 89beecc8..232559ae 100644 --- a/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -6,28 +6,32 @@ import ( "fmt" "io" "net" + "sort" "strconv" + "strings" "sync" "sync/atomic" "time" - "github.com/rcrowley/go-metrics" + metrics "github.com/rcrowley/go-metrics" ) // Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. type Broker struct { - id int32 - addr string + conf *Config + rack *string - conf *Config + id int32 + addr string correlationID int32 conn net.Conn connErr error lock sync.Mutex opened int32 + responses chan responsePromise + done chan bool - responses chan responsePromise - done chan bool + registeredMetrics []string incomingByteRate metrics.Meter requestRate metrics.Meter @@ -36,6 +40,7 @@ type Broker struct { outgoingByteRate metrics.Meter responseRate metrics.Meter responseSize metrics.Histogram + requestsInFlight metrics.Counter brokerIncomingByteRate metrics.Meter brokerRequestRate metrics.Meter brokerRequestSize metrics.Histogram @@ -43,16 +48,83 @@ type Broker struct { brokerOutgoingByteRate metrics.Meter brokerResponseRate metrics.Meter brokerResponseSize metrics.Histogram + brokerRequestsInFlight metrics.Counter + + kerberosAuthenticator GSSAPIKerberosAuth +} + +// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker +type SASLMechanism string + +const ( + // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+) + SASLTypeOAuth = "OAUTHBEARER" + // SASLTypePlaintext represents the SASL/PLAIN mechanism + SASLTypePlaintext = "PLAIN" + // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism. + SASLTypeSCRAMSHA256 = "SCRAM-SHA-256" + // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism. + SASLTypeSCRAMSHA512 = "SCRAM-SHA-512" + SASLTypeGSSAPI = "GSSAPI" + // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL auth using opaque packets. + SASLHandshakeV0 = int16(0) + // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and + // server negotiate SASL by wrapping tokens with Kafka protocol headers. + SASLHandshakeV1 = int16(1) + // SASLExtKeyAuth is the reserved extension key name sent as part of the + // SASL/OAUTHBEARER initial client response + SASLExtKeyAuth = "auth" +) + +// AccessToken contains an access token used to authenticate a +// SASL/OAUTHBEARER client along with associated metadata. +type AccessToken struct { + // Token is the access token payload. + Token string + // Extensions is a optional map of arbitrary key-value pairs that can be + // sent with the SASL/OAUTHBEARER initial client response. These values are + // ignored by the SASL server if they are unexpected. This feature is only + // supported by Kafka >= 2.1.0. + Extensions map[string]string +} + +// AccessTokenProvider is the interface that encapsulates how implementors +// can generate access tokens for Kafka broker authentication. +type AccessTokenProvider interface { + // Token returns an access token. The implementation should ensure token + // reuse so that multiple calls at connect time do not create multiple + // tokens. The implementation should also periodically refresh the token in + // order to guarantee that each call returns an unexpired token. This + // method should not block indefinitely--a timeout error should be returned + // after a short period of inactivity so that the broker connection logic + // can log debugging information and retry. + Token() (*AccessToken, error) +} + +// SCRAMClient is a an interface to a SCRAM +// client implementation. +type SCRAMClient interface { + // Begin prepares the client for the SCRAM exchange + // with the server with a user name and a password + Begin(userName, password, authzID string) error + // Step steps client through the SCRAM exchange. It is + // called repeatedly until it errors or `Done` returns true. + Step(challenge string) (response string, err error) + // Done should return true when the SCRAM conversation + // is over. + Done() bool } type responsePromise struct { requestTime time.Time correlationID int32 + headerVersion int16 packets chan []byte errors chan error } -// NewBroker creates and returns a Broker targetting the given host:port address. +// NewBroker creates and returns a Broker targeting the given host:port address. // This does not attempt to actually connect, you have to call Open() for that. func NewBroker(addr string) *Broker { return &Broker{id: -1, addr: addr} @@ -82,24 +154,19 @@ func (b *Broker) Open(conf *Config) error { go withRecover(func() { defer b.lock.Unlock() - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) b.conn = nil atomic.StoreInt32(&b.opened, 0) return } - b.conn = newBufConn(b.conn) + if conf.Net.TLS.Enable { + b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) + } + b.conn = newBufConn(b.conn) b.conf = conf // Create or reuse the global metrics shared between brokers @@ -110,20 +177,16 @@ func (b *Broker) Open(conf *Config) error { b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry) // Do not gather metrics for seeded broker (only used during bootstrap) because they share // the same id (-1) and are already exposed through the global metrics above if b.id >= 0 { - b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) - b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) - b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) - b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) - b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) - b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) - b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + b.registerMetrics() } if conf.Net.SASL.Enable { - b.connErr = b.sendAndReceiveSASLPlainAuth() + b.connErr = b.authenticateViaSASL() + if b.connErr != nil { err = b.conn.Close() if err == nil { @@ -160,6 +223,7 @@ func (b *Broker) Connected() (bool, error) { return b.conn != nil, b.connErr } +//Close closes the broker resources func (b *Broker) Close() error { b.lock.Lock() defer b.lock.Unlock() @@ -178,6 +242,8 @@ func (b *Broker) Close() error { b.done = nil b.responses = nil + b.unregisterMetrics() + if err == nil { Logger.Printf("Closed connection to broker %s\n", b.addr) } else { @@ -199,6 +265,18 @@ func (b *Broker) Addr() string { return b.addr } +// Rack returns the broker's rack as retrieved from Kafka's metadata or the +// empty string if it is not known. The returned value corresponds to the +// broker's broker.rack configuration setting. Requires protocol version to be +// at least v0.10.0.0. +func (b *Broker) Rack() string { + if b.rack == nil { + return "" + } + return *b.rack +} + +//GetMetadata send a metadata request and returns a metadata response or error func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { response := new(MetadataResponse) @@ -211,6 +289,7 @@ func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error return response, nil } +//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { response := new(ConsumerMetadataResponse) @@ -223,6 +302,20 @@ func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*Consume return response, nil } +//FindCoordinator sends a find coordinate request and returns a response or error +func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + response := new(FindCoordinatorResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +//GetAvailableOffsets return an offset response or error func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { response := new(OffsetResponse) @@ -235,9 +328,12 @@ func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, e return response, nil } +//Produce returns a produce response or error func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error + var ( + response *ProduceResponse + err error + ) if request.RequiredAcks == NoResponse { err = b.sendAndReceive(request, nil) @@ -253,11 +349,11 @@ func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { return response, nil } +//Fetch returns a FetchResponse or error func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { response := new(FetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -265,11 +361,11 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } +//CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -277,11 +373,11 @@ func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitRespon return response, nil } +//FetchOffset returns an offset fetch response or error func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { response := new(OffsetFetchResponse) err := b.sendAndReceive(request, response) - if err != nil { return nil, err } @@ -289,6 +385,7 @@ func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, return response, nil } +//JoinGroup returns a join group response or error func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { response := new(JoinGroupResponse) @@ -300,6 +397,7 @@ func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error return response, nil } +//SyncGroup returns a sync group response or error func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { response := new(SyncGroupResponse) @@ -311,6 +409,7 @@ func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error return response, nil } +//LeaveGroup return a leave group response or error func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { response := new(LeaveGroupResponse) @@ -322,6 +421,7 @@ func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, er return response, nil } +//Heartbeat returns a heartbeat response or error func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { response := new(HeartbeatResponse) @@ -333,6 +433,7 @@ func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error return response, nil } +//ListGroups return a list group response or error func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { response := new(ListGroupsResponse) @@ -344,6 +445,7 @@ func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, er return response, nil } +//DescribeGroups return describe group response or error func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { response := new(DescribeGroupsResponse) @@ -355,7 +457,262 @@ func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroups return response, nil } -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { +//ApiVersions return api version response or error +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreateTopics send a create topic request and returns create topic response +func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) { + response := new(CreateTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteTopics sends a delete topic request and returns delete topic response +func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + response := new(DeleteTopicsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreatePartitions sends a create partition request and returns create +//partitions response or error +func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + response := new(CreatePartitionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AlterPartitionReassignments sends a alter partition reassignments request and +//returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListPartitionReassignments sends a list partition reassignments request and +//returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteRecords send a request to delete records and return delete record +//response or error +func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { + response := new(DeleteRecordsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DescribeAcls sends a describe acl request and returns a response or error +func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) { + response := new(DescribeAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//CreateAcls sends a create acl request and returns a response or error +func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) { + response := new(CreateAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteAcls sends a delete acl request and returns a response or error +func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) { + response := new(DeleteAclsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//InitProducerID sends an init producer request and returns a response or error +func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) { + response := new(InitProducerIDResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AddPartitionsToTxn send a request to add partition to txn and returns +//a response or error +func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) { + response := new(AddPartitionsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AddOffsetsToTxn sends a request to add offsets to txn and returns a response +//or error +func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) { + response := new(AddOffsetsToTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//EndTxn sends a request to end txn and returns a response or error +func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) { + response := new(EndTxnResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//TxnOffsetCommit sends a request to commit transaction offsets and returns +//a response or error +func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) { + response := new(TxnOffsetCommitResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DescribeConfigs sends a request to describe config and returns a response or +//error +func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + response := new(DescribeConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//AlterConfigs sends a request to alter config and return a response or error +func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) { + response := new(AlterConfigsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//DeleteGroups sends a request to delete groups and returns a response or error +func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) { + response := new(DeleteGroupsResponse) + + if err := b.sendAndReceive(request, response); err != nil { + return nil, err + } + + return response, nil +} + +//DescribeLogDirs sends a request to get the broker's log dir paths and sizes +func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) { + response := new(DescribeLogDirsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +// readFull ensures the conn ReadDeadline has been setup before making a +// call to io.ReadFull +func (b *Broker) readFull(buf []byte) (n int, err error) { + if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil { + return 0, err + } + + return io.ReadFull(b.conn, buf) +} + +// write ensures the conn WriteDeadline has been setup before making a +// call to conn.Write +func (b *Broker) write(buf []byte) (n int, err error) { + if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil { + return 0, err + } + + return b.conn.Write(buf) +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() @@ -376,34 +733,36 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, return nil, err } - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return nil, err - } - requestTime := time.Now() - bytes, err := b.conn.Write(buf) + // Will be decremented in responseReceiver (except error or request with NoResponse) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) return nil, err } b.correlationID++ if !promiseResponse { // Record request latency without the response - b.updateRequestLatencyMetrics(time.Since(requestTime)) + b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime)) return nil, nil } - promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil } -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + promise, err := b.send(req, res != nil, responseHeaderVersion) if err != nil { return err } @@ -420,7 +779,7 @@ func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { } } -func (b *Broker) decode(pd packetDecoder) (err error) { +func (b *Broker) decode(pd packetDecoder, version int16) (err error) { b.id, err = pd.getInt32() if err != nil { return err @@ -436,6 +795,13 @@ func (b *Broker) decode(pd packetDecoder) (err error) { return err } + if version >= 1 { + b.rack, err = pd.getNullableString() + if err != nil { + return err + } + } + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) if _, _, err := net.SplitHostPort(b.addr); err != nil { return err @@ -444,12 +810,12 @@ func (b *Broker) decode(pd packetDecoder) (err error) { return nil } -func (b *Broker) encode(pe packetEncoder) (err error) { - +func (b *Broker) encode(pe packetEncoder, version int16) (err error) { host, portstr, err := net.SplitHostPort(b.addr) if err != nil { return err } + port, err := strconv.Atoi(portstr) if err != nil { return err @@ -464,26 +830,32 @@ func (b *Broker) encode(pe packetEncoder) (err error) { pe.putInt32(int32(port)) + if version >= 1 { + err = pe.putNullableString(b.rack) + if err != nil { + return err + } + } + return nil } func (b *Broker) responseReceiver() { var dead error - header := make([]byte, 8) + for response := range b.responses { if dead != nil { + // This was previously incremented in send() and + // we are not calling updateIncomingCommunicationMetrics() + b.addRequestInFlightMetrics(-1) response.errors <- dead continue } - err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) - if err != nil { - dead = err - response.errors <- err - continue - } + var headerLength = getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) - bytesReadHeader, err := io.ReadFull(b.conn, header) + bytesReadHeader, err := b.readFull(header) requestLatency := time.Since(response.requestTime) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) @@ -493,7 +865,7 @@ func (b *Broker) responseReceiver() { } decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) + err = versionedDecode(header, &decodedHeader, response.headerVersion) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err @@ -509,8 +881,8 @@ func (b *Broker) responseReceiver() { continue } - buf := make([]byte, decodedHeader.length-4) - bytesReadBody, err := io.ReadFull(b.conn, buf) + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) + bytesReadBody, err := b.readFull(buf) b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { dead = err @@ -523,58 +895,96 @@ func (b *Broker) responseReceiver() { close(b.done) } -func (b *Broker) sendAndReceiveSASLPlainHandshake() error { - rb := &SaslHandshakeRequest{"PLAIN"} - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req, b.conf.MetricRegistry) - if err != nil { - return err +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + +func (b *Broker) authenticateViaSASL() error { + switch b.conf.Net.SASL.Mechanism { + case SASLTypeOAuth: + return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider) + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + return b.sendAndReceiveSASLSCRAMv1() + case SASLTypeGSSAPI: + return b.sendAndReceiveKerberos() + default: + return b.sendAndReceiveSASLPlainAuth() } +} - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) +func (b *Broker) sendAndReceiveKerberos() error { + b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI + if b.kerberosAuthenticator.NewKerberosClientFunc == nil { + b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient + } + return b.kerberosAuthenticator.Authorize(b) +} + +func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error { + rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version} + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) if err != nil { return err } requestTime := time.Now() - bytes, err := b.conn.Write(buf) + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytes, err := b.write(buf) b.updateOutgoingCommunicationMetrics(bytes) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) return err } b.correlationID++ - //wait for the response + header := make([]byte, 8) // response header - _, err = io.ReadFull(b.conn, header) + _, err = b.readFull(header) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) return err } + length := binary.BigEndian.Uint32(header[:4]) payload := make([]byte, length-4) - n, err := io.ReadFull(b.conn, payload) + n, err := b.readFull(payload) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) return err } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) if err != nil { Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) return err } + if res.Err != ErrNoError { Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) return res.Err } - Logger.Print("Successful SASL handshake") + + Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms) return nil } -// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) -// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43). +// Kafka 1.x.x onward added a SaslAuthenticate request/response message which +// wraps the SASL flow in the Kafka protocol, which allows for returning +// meaningful errors on authentication failure. // // In SASL Plain, Kafka expects the auth header to be in the following format // Message format (from https://tools.ietf.org/html/rfc4616): @@ -588,38 +998,50 @@ func (b *Broker) sendAndReceiveSASLPlainHandshake() error { // SAFE = UTF1 / UTF2 / UTF3 / UTF4 // ;; any UTF-8 encoded Unicode character except NUL // +// With SASL v0 handshake and auth then: // When credentials are valid, Kafka returns a 4 byte array of null characters. -// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way -// of responding to bad credentials but thats how its being done today. +// When credentials are invalid, Kafka closes the connection. +// +// With SASL v1 handshake and auth then: +// When credentials are invalid, Kafka replies with a SaslAuthenticate response +// containing an error code and message detailing the authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuth() error { + // default to V0 to allow for backward compatibility when SASL is enabled + // but not the handshake if b.conf.Net.SASL.Handshake { - handshakeErr := b.sendAndReceiveSASLPlainHandshake() + handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { Logger.Printf("Error while performing SASL handshake %s\n", b.addr) return handshakeErr } } - length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) - authBytes := make([]byte, length+4) //4 byte length header + auth data - binary.BigEndian.PutUint32(authBytes, uint32(length)) - copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) - err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) - return err + if b.conf.Net.SASL.Version == SASLHandshakeV1 { + return b.sendAndReceiveV1SASLPlainAuth() } + return b.sendAndReceiveV0SASLPlainAuth() +} + +// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol +func (b *Broker) sendAndReceiveV0SASLPlainAuth() error { + length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte(b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) requestTime := time.Now() - bytesWritten, err := b.conn.Write(authBytes) + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.write(authBytes) b.updateOutgoingCommunicationMetrics(bytesWritten) if err != nil { + b.addRequestInFlightMetrics(-1) Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) return err } header := make([]byte, 4) - n, err := io.ReadFull(b.conn, header) + n, err := b.readFull(header) b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) // If the credentials are valid, we would get a 4 byte response filled with null characters. // Otherwise, the broker closes the connection and we get an EOF @@ -632,29 +1054,319 @@ func (b *Broker) sendAndReceiveSASLPlainAuth() error { return nil } +// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol +func (b *Broker) sendAndReceiveV1SASLPlainAuth() error { + correlationID := b.correlationID + + requestTime := time.Now() + + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + + bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID) + b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime)) + + // With v1 sasl we get an error message set in the response we can return + if err != nil { + Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error()) + return err + } + + return nil +} + +// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255 +// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876 +func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error { + if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil { + return err + } + + token, err := provider.Token() + if err != nil { + return err + } + + message, err := buildClientFirstMessage(token) + if err != nil { + return err + } + + challenged, err := b.sendClientMessage(message) + if err != nil { + return err + } + + if challenged { + // Abort the token exchange. The broker returns the failure code. + _, err = b.sendClientMessage([]byte(`\x01`)) + } + + return err +} + +// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true +// if the broker responds with a challenge, in which case the token is +// rejected. +func (b *Broker) sendClientMessage(message []byte) (bool, error) { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + + bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + return false, err + } + + b.correlationID++ + + res := &SaslAuthenticateResponse{} + bytesRead, err := b.receiveSASLServerResponse(res, correlationID) + + requestLatency := time.Since(requestTime) + b.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + + isChallenge := len(res.SaslAuthBytes) > 0 + + if isChallenge && err != nil { + Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes) + } + + return isChallenge, err +} + +func (b *Broker) sendAndReceiveSASLSCRAMv1() error { + if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil { + return err + } + + scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc() + if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil { + return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error()) + } + + msg, err := scramClient.Step("") + if err != nil { + return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error()) + } + + for !scramClient.Done() { + requestTime := time.Now() + // Will be decremented in updateIncomingCommunicationMetrics (except error) + b.addRequestInFlightMetrics(1) + correlationID := b.correlationID + bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg)) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.correlationID++ + challenge, err := b.receiveSaslAuthenticateResponse(correlationID) + if err != nil { + b.addRequestInFlightMetrics(-1) + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime)) + msg, err = scramClient.Step(string(challenge)) + if err != nil { + Logger.Println("SASL authentication failed", err) + return err + } + } + + Logger.Println("SASL authentication succeeded") + return nil +} + +func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) { + rb := &SaslAuthenticateRequest{msg} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + _, err := b.readFull(buf) + if err != nil { + return nil, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return nil, err + } + + if header.correlationID != correlationID { + return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + _, err = b.readFull(buf) + if err != nil { + return nil, err + } + + res := &SaslAuthenticateResponse{} + if err := versionedDecode(buf, res, 0); err != nil { + return nil, err + } + if res.Err != ErrNoError { + return nil, res.Err + } + return res.SaslAuthBytes, nil +} + +// Build SASL/OAUTHBEARER initial client response as described by RFC-7628 +// https://tools.ietf.org/html/rfc7628 +func buildClientFirstMessage(token *AccessToken) ([]byte, error) { + var ext string + + if token.Extensions != nil && len(token.Extensions) > 0 { + if _, ok := token.Extensions[SASLExtKeyAuth]; ok { + return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth) + } + ext = "\x01" + mapToString(token.Extensions, "=", "\x01") + } + + resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)) + + return resp, nil +} + +// mapToString returns a list of key-value pairs ordered by key. +// keyValSep separates the key from the value. elemSep separates each pair. +func mapToString(extensions map[string]string, keyValSep string, elemSep string) string { + buf := make([]string, 0, len(extensions)) + + for k, v := range extensions { + buf = append(buf, k+keyValSep+v) + } + + sort.Strings(buf) + + return strings.Join(buf, elemSep) +} + +func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) { + authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) + rb := &SaslAuthenticateRequest{authBytes} + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) { + rb := &SaslAuthenticateRequest{initialResp} + + req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb} + + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return 0, err + } + + return b.write(buf) +} + +func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) { + buf := make([]byte, responseLengthSize+correlationIDSize) + bytesRead, err := b.readFull(buf) + if err != nil { + return bytesRead, err + } + + header := responseHeader{} + err = versionedDecode(buf, &header, 0) + if err != nil { + return bytesRead, err + } + + if header.correlationID != correlationID { + return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID) + } + + buf = make([]byte, header.length-correlationIDSize) + c, err := b.readFull(buf) + bytesRead += c + if err != nil { + return bytesRead, err + } + + if err := versionedDecode(buf, res, 0); err != nil { + return bytesRead, err + } + + if res.Err != ErrNoError { + return bytesRead, res.Err + } + + return bytesRead, nil +} + func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { - b.updateRequestLatencyMetrics(requestLatency) + b.updateRequestLatencyAndInFlightMetrics(requestLatency) b.responseRate.Mark(1) + if b.brokerResponseRate != nil { b.brokerResponseRate.Mark(1) } + responseSize := int64(bytes) b.incomingByteRate.Mark(responseSize) if b.brokerIncomingByteRate != nil { b.brokerIncomingByteRate.Mark(responseSize) } + b.responseSize.Update(responseSize) if b.brokerResponseSize != nil { b.brokerResponseSize.Update(responseSize) } } -func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { +func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) { requestLatencyInMs := int64(requestLatency / time.Millisecond) b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { b.brokerRequestLatency.Update(requestLatencyInMs) } + + b.addRequestInFlightMetrics(-1) +} + +func (b *Broker) addRequestInFlightMetrics(i int64) { + b.requestsInFlight.Inc(i) + if b.brokerRequestsInFlight != nil { + b.brokerRequestsInFlight.Inc(i) + } } func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { @@ -662,13 +1374,68 @@ func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { if b.brokerRequestRate != nil { b.brokerRequestRate.Mark(1) } + requestSize := int64(bytes) b.outgoingByteRate.Mark(requestSize) if b.brokerOutgoingByteRate != nil { b.brokerOutgoingByteRate.Mark(requestSize) } + b.requestSize.Update(requestSize) if b.brokerRequestSize != nil { b.brokerRequestSize.Update(requestSize) } } + +func (b *Broker) registerMetrics() { + b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate") + b.brokerRequestRate = b.registerMeter("request-rate") + b.brokerRequestSize = b.registerHistogram("request-size") + b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms") + b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate") + b.brokerResponseRate = b.registerMeter("response-rate") + b.brokerResponseSize = b.registerHistogram("response-size") + b.brokerRequestsInFlight = b.registerCounter("requests-in-flight") +} + +func (b *Broker) unregisterMetrics() { + for _, name := range b.registeredMetrics { + b.conf.MetricRegistry.Unregister(name) + } + b.registeredMetrics = nil +} + +func (b *Broker) registerMeter(name string) metrics.Meter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerHistogram(name string) metrics.Histogram { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry) +} + +func (b *Broker) registerCounter(name string) metrics.Counter { + nameForBroker := getMetricNameForBroker(name, b) + b.registeredMetrics = append(b.registeredMetrics, nameForBroker) + return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) +} + +func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { + if cfg == nil { + cfg = &tls.Config{} + } + if cfg.ServerName != "" { + return cfg + } + + c := cfg.Clone() + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) + } + c.ServerName = sn + return c +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go index f869a143..c3392f96 100644 --- a/vendor/github.com/Shopify/sarama/client.go +++ b/vendor/github.com/Shopify/sarama/client.go @@ -17,6 +17,18 @@ type Client interface { // altered after it has been created. Config() *Config + // Controller returns the cluster controller broker. It will return a + // locally cached value if it's available. You can call RefreshController + // to update the cached value. Requires Kafka 0.10 or higher. + Controller() (*Broker, error) + + // RefreshController retrieves the cluster controller from fresh metadata + // and stores it in the local cache. Requires Kafka 0.10 or higher. + RefreshController() (*Broker, error) + + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + // Topics returns the set of available topics as retrieved from cluster metadata. Topics() ([]string, error) @@ -35,15 +47,24 @@ type Client interface { // Replicas returns the set of all replica IDs for the given partition. Replicas(topic string, partitionID int32) ([]int32, error) + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + + // OfflineReplicas returns the set of all offline replica IDs for the given + // partition. Offline replicas are replicas which are offline + OfflineReplicas(topic string, partitionID int32) ([]int32, error) + // RefreshMetadata takes a list of topics and queries the cluster to refresh the // available metadata for those topics. If no topics are provided, it will refresh // metadata for all topics. RefreshMetadata(topics ...string) error // GetOffset queries the cluster to get the most recent available offset at the - // given time on the topic/partition combination. Time should be OffsetOldest for - // the earliest available offset, OffsetNewest for the offset of the message that - // will be produced next, or a time. + // given time (in milliseconds) on the topic/partition combination. + // Time should be OffsetOldest for the earliest available offset, + // OffsetNewest for the offset of the message that will be produced next, or a time. GetOffset(topic string, partitionID int32, time int64) (int64, error) // Coordinator returns the coordinating broker for a consumer group. It will @@ -56,6 +77,9 @@ type Client interface { // in local cache. This function only works on Kafka 0.8.2 and higher. RefreshCoordinator(consumerGroup string) error + // InitProducerID retrieves information required for Idempotent Producer + InitProducerID() (*InitProducerIDResponse, error) + // Close shuts down all broker connections managed by this client. It is required // to call this function before a client object passes out of scope, as it will // otherwise leak memory. You must close any Producers or Consumers using a client @@ -89,9 +113,11 @@ type client struct { seedBrokers []*Broker deadSeeds []*Broker - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + controllerID int32 // cluster controller broker id + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + metadataTopics map[string]none // topics that need to collect metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs // If the number of partitions is large, we can get some churn calling cachedPartitions, // so the result is cached. It is important to update this value whenever metadata is changed @@ -124,6 +150,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) { closed: make(chan none), brokers: make(map[int32]*Broker), metadata: make(map[string]map[int32]*PartitionMetadata), + metadataTopics: make(map[string]none), cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), coordinators: make(map[string]int32), } @@ -133,18 +160,20 @@ func NewClient(addrs []string, conf *Config) (Client, error) { client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) } - // do an initial fetch of all cluster metadata by specifing an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } } go withRecover(client.backgroundMetadataUpdater) @@ -157,6 +186,35 @@ func (client *client) Config() *Config { return client.conf } +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0, len(client.brokers)) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + +func (client *client) InitProducerID() (*InitProducerIDResponse, error) { + var err error + for broker := client.any(); broker != nil; broker = client.any() { + req := &InitProducerIDRequest{} + + response, err := broker.InitProducerID(req) + switch err.(type) { + case nil: + return response, nil + default: + // some error, remove that broker and try again + Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + return nil, err +} + func (client *client) Close() error { if client.Closed() { // Chances are this is being called from a defer() and the error will go unobserved @@ -183,11 +241,15 @@ func (client *client) Close() error { client.brokers = nil client.metadata = nil + client.metadataTopics = nil return nil } func (client *client) Closed() bool { + client.lock.RLock() + defer client.lock.RUnlock() + return client.brokers == nil } @@ -207,6 +269,22 @@ func (client *client) Topics() ([]string, error) { return ret, nil } +func (client *client) MetadataTopics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadataTopics)) + for topic := range client.metadataTopics { + ret = append(ret, topic) + } + + return ret, nil +} + func (client *client) Partitions(topic string) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient @@ -222,7 +300,8 @@ func (client *client) Partitions(topic string) ([]int32, error) { partitions = client.cachedPartitions(topic, allPartitions) } - if partitions == nil { + // no partitions found after refresh metadata + if len(partitions) == 0 { return nil, ErrUnknownTopicOrPartition } @@ -277,9 +356,59 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) } if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err + return dupInt32Slice(metadata.Replicas), metadata.Err } - return dupeAndSort(metadata.Replicas), nil + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.Isr), metadata.Err + } + return dupInt32Slice(metadata.Isr), nil +} + +func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + } + return dupInt32Slice(metadata.OfflineReplicas), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { @@ -314,7 +443,11 @@ func (client *client) RefreshMetadata(topics ...string) error { } } - return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) + deadline := time.Time{} + if client.conf.Metadata.Timeout > 0 { + deadline = time.Now().Add(client.conf.Metadata.Timeout) + } + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline) } func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { @@ -334,6 +467,60 @@ func (client *client) GetOffset(topic string, partitionID int32, time int64) (in return offset, err } +func (client *client) Controller() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + if !client.conf.Version.IsAtLeast(V0_10_0_0) { + return nil, ErrUnsupportedVersion + } + + controller := client.cachedController() + if controller == nil { + if err := client.refreshMetadata(); err != nil { + return nil, err + } + controller = client.cachedController() + } + + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + +// deregisterController removes the cached controllerID +func (client *client) deregisterController() { + client.lock.Lock() + defer client.lock.Unlock() + delete(client.brokers, client.controllerID) +} + +// RefreshController retrieves the cluster controller from fresh metadata +// and stores it in the local cache. Requires Kafka 0.10 or higher. +func (client *client) RefreshController() (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.deregisterController() + + if err := client.refreshMetadata(); err != nil { + return nil, err + } + + controller := client.cachedController() + if controller == nil { + return nil, ErrControllerNotAvailable + } + + _ = controller.Open(client.conf) + return controller, nil +} + func (client *client) Coordinator(consumerGroup string) (*Broker, error) { if client.Closed() { return nil, ErrClosedClient @@ -375,10 +562,39 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { // private broker management helpers +func (client *client) updateBroker(brokers []*Broker) { + var currentBroker = make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered // in the brokers map. It returns the broker that is registered, which may be the provided broker, // or a previously registered Broker instance. You must hold the write lock before calling this function. func (client *client) registerBroker(broker *Broker) { + if client.brokers == nil { + Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr()) + return + } + if client.brokers[broker.ID()] == nil { client.brokers[broker.ID()] = broker Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) @@ -562,7 +778,7 @@ func (client *client) backgroundMetadataUpdater() { for { select { case <-ticker.C: - if err := client.RefreshMetadata(); err != nil { + if err := client.refreshMetadata(); err != nil { Logger.Println("Client background metadata update:", err) } case <-client.closer: @@ -571,70 +787,152 @@ func (client *client) backgroundMetadataUpdater() { } } -func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { +func (client *client) refreshMetadata() error { + topics := []string{} + + if !client.conf.Metadata.Full { + if specificTopics, err := client.MetadataTopics(); err != nil { + return err + } else if len(specificTopics) == 0 { + return ErrNoTopicsToUpdateMetadata + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { + return err + } + + return nil +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error { + pastDeadline := func(backoff time.Duration) bool { + if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) { + // we are past the deadline + return true + } + return false + } retry := func(err error) error { if attemptsRemaining > 0 { - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.tryRefreshMetadata(topics, attemptsRemaining-1) + backoff := client.computeBackoff(attemptsRemaining) + if pastDeadline(backoff) { + Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout") + return err + } + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + if backoff > 0 { + time.Sleep(backoff) + } + return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline) } return err } - for broker := client.any(); broker != nil; broker = client.any() { + broker := client.any() + for ; broker != nil && !pastDeadline(0); broker = client.any() { + allowAutoTopicCreation := true if len(topics) > 0 { Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) } else { + allowAutoTopicCreation = false Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) } - response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) + req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation} + if client.conf.Version.IsAtLeast(V1_0_0_0) { + req.Version = 5 + } else if client.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 1 + } + response, err := broker.GetMetadata(req) switch err.(type) { case nil: + allKnownMetaData := len(topics) == 0 // valid response, use it - if shouldRetry, err := client.updateMetadata(response); shouldRetry { + shouldRetry, err := client.updateMetadata(response, allKnownMetaData) + if shouldRetry { Logger.Println("client/metadata found some partitions to be leaderless") return retry(err) // note: err can be nil - } else { - return err } + return err case PacketEncodingError: // didn't even send, return the error return err + + case KError: + // if SASL auth error return as this _should_ be a non retryable err for all brokers + if err.(KError) == ErrSASLAuthenticationFailed { + Logger.Println("client/metadata failed SASL authentication") + return err + } + + if err.(KError) == ErrTopicAuthorizationFailed { + Logger.Println("client is not authorized to access this topic. The topics were: ", topics) + return err + } + // else remove that broker and try again + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) + _ = broker.Close() + client.deregisterBroker(broker) + default: // some other error, remove that broker and try again - Logger.Println("client/metadata got error from broker while fetching metadata:", err) + Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err) _ = broker.Close() client.deregisterBroker(broker) } } + if broker != nil { + Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr) + return retry(ErrOutOfBrokers) + } + Logger.Println("client/metadata no available broker to send metadata request to") client.resurrectDeadBrokers() return retry(ErrOutOfBrokers) } // if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable -func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { +func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) { + if client.Closed() { + return + } + client.lock.Lock() defer client.lock.Unlock() // For all the brokers we received: // - if it is a new ID, save it // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } + client.updateBroker(data.Brokers) + + client.controllerID = data.ControllerID + if allKnownMetaData { + client.metadata = make(map[string]map[int32]*PartitionMetadata) + client.metadataTopics = make(map[string]none) + client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32) + } for _, topic := range data.Topics { + // topics must be added firstly to `metadataTopics` to guarantee that all + // requested topics must be recorded to keep them trackable for periodically + // metadata refresh. + if _, exists := client.metadataTopics[topic.Name]; !exists { + client.metadataTopics[topic.Name] = none{} + } delete(client.metadata, topic.Name) delete(client.cachedPartitionsResults, topic.Name) switch topic.Err { case ErrNoError: - break + // no-op case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results err = topic.Err continue @@ -644,7 +942,6 @@ func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err er continue case ErrLeaderNotAvailable: // retry, but store partial partition results retry = true - break default: // don't retry, don't store partial results Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) err = topic.Err @@ -677,11 +974,28 @@ func (client *client) cachedCoordinator(consumerGroup string) *Broker { return nil } -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { - retry := func(err error) (*ConsumerMetadataResponse, error) { +func (client *client) cachedController() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + return client.brokers[client.controllerID] +} + +func (client *client) computeBackoff(attemptsRemaining int) time.Duration { + if client.conf.Metadata.Retry.BackoffFunc != nil { + maxRetries := client.conf.Metadata.Retry.Max + retries := maxRetries - attemptsRemaining + return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries) + } + return client.conf.Metadata.Retry.Backoff +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) { + retry := func(err error) (*FindCoordinatorResponse, error) { if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) + backoff := client.computeBackoff(attemptsRemaining) + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining) + time.Sleep(backoff) return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) } return nil, err @@ -690,10 +1004,11 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin for broker := client.any(); broker != nil; broker = client.any() { Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) - request := new(ConsumerMetadataRequest) - request.ConsumerGroup = consumerGroup + request := new(FindCoordinatorRequest) + request.CoordinatorKey = consumerGroup + request.CoordinatorType = CoordinatorGroup - response, err := broker.GetConsumerMetadata(request) + response, err := broker.FindCoordinator(request) if err != nil { Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) @@ -725,6 +1040,10 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin } return retry(ErrConsumerCoordinatorNotAvailable) + case ErrGroupAuthorizationFailed: + Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup) + return retry(ErrGroupAuthorizationFailed) + default: return nil, response.Err } @@ -734,3 +1053,18 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin client.resurrectDeadBrokers() return retry(ErrOutOfBrokers) } + +// nopCloserClient embeds an existing Client, but disables +// the Close method (yet all other methods pass +// through unchanged). This is for use in larger structs +// where it is undesirable to close the client that was +// passed in by the caller. +type nopCloserClient struct { + Client +} + +// Close intercepts and purposely does not call the underlying +// client's Close() method. +func (ncc *nopCloserClient) Close() error { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go new file mode 100644 index 00000000..12cd7c3d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/compress.go @@ -0,0 +1,194 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4WriterPool = sync.Pool{ + New: func() interface{} { + return lz4.NewWriter(nil) + }, + } + + gzipWriterPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + gzipWriterPoolForCompressionLevel1 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 1) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel2 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 2) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel3 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 3) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel4 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 4) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel5 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 5) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel6 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 6) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel7 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 7) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel8 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 8) + if err != nil { + panic(err) + } + return gz + }, + } + gzipWriterPoolForCompressionLevel9 = sync.Pool{ + New: func() interface{} { + gz, err := gzip.NewWriterLevel(nil, 9) + if err != nil { + panic(err) + } + return gz + }, + } +) + +func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + buf bytes.Buffer + writer *gzip.Writer + ) + + switch level { + case CompressionLevelDefault: + writer = gzipWriterPool.Get().(*gzip.Writer) + defer gzipWriterPool.Put(writer) + writer.Reset(&buf) + case 1: + writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel1.Put(writer) + writer.Reset(&buf) + case 2: + writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel2.Put(writer) + writer.Reset(&buf) + case 3: + writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel3.Put(writer) + writer.Reset(&buf) + case 4: + writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel4.Put(writer) + writer.Reset(&buf) + case 5: + writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel5.Put(writer) + writer.Reset(&buf) + case 6: + writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel6.Put(writer) + writer.Reset(&buf) + case 7: + writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel7.Put(writer) + writer.Reset(&buf) + case 8: + writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel8.Put(writer) + writer.Reset(&buf) + case 9: + writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer) + defer gzipWriterPoolForCompressionLevel9.Put(writer) + writer.Reset(&buf) + default: + writer, err = gzip.NewWriterLevel(&buf, level) + if err != nil { + return nil, err + } + } + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionSnappy: + return snappy.Encode(data), nil + case CompressionLZ4: + writer := lz4WriterPool.Get().(*lz4.Writer) + defer lz4WriterPool.Put(writer) + + var buf bytes.Buffer + writer.Reset(&buf) + + if _, err := writer.Write(data); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil + case CompressionZSTD: + return zstdCompress(nil, data) + default: + return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go index a417a38b..0ce308f8 100644 --- a/vendor/github.com/Shopify/sarama/config.go +++ b/vendor/github.com/Shopify/sarama/config.go @@ -1,11 +1,16 @@ package sarama import ( + "compress/gzip" "crypto/tls" + "fmt" + "io/ioutil" + "net" "regexp" "time" "github.com/rcrowley/go-metrics" + "golang.org/x/net/proxy" ) const defaultClientID = "sarama" @@ -14,6 +19,20 @@ var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) // Config is used to pass multiple configuration options to Sarama's constructors. type Config struct { + // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client. + Admin struct { + Retry struct { + // The total number of times to retry sending (retriable) admin requests (default 5). + // Similar to the `retries` setting of the JVM AdminClientConfig. + Max int + // Backoff time between retries of a failed request (default 100ms) + Backoff time.Duration + } + // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations, + // including topics, brokers, configurations and ACLs (defaults to 3 seconds). + Timeout time.Duration + } + // Net is the namespace for network-level properties used by the Broker, and // shared by the Client/Producer/Consumer. Net struct { @@ -43,18 +62,58 @@ type Config struct { // Whether or not to use SASL authentication when connecting to the broker // (defaults to false). Enable bool + // SASLMechanism is the name of the enabled SASL mechanism. + // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). + Mechanism SASLMechanism + // Version is the SASL Protocol Version to use + // Kafka > 1.x should use V1, except on Azure EventHub which use V0 + Version int16 // Whether or not to send the Kafka SASL handshake first if enabled // (defaults to true). You should only set this to false if you're using // a non-Kafka SASL proxy. Handshake bool - //username and password for SASL/PLAIN authentication - User string + // AuthIdentity is an (optional) authorization identity (authzid) to + // use for SASL/PLAIN authentication (if different from User) when + // an authenticated user is permitted to act as the presented + // alternative user. See RFC4616 for details. + AuthIdentity string + // User is the authentication identity (authcid) to present for + // SASL/PLAIN or SASL/SCRAM authentication + User string + // Password for SASL/PLAIN authentication Password string + // authz id used for SASL/SCRAM authentication + SCRAMAuthzID string + // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM + // client used to perform the SCRAM exchange with the server. + SCRAMClientGeneratorFunc func() SCRAMClient + // TokenProvider is a user-defined callback for generating + // access tokens for SASL/OAUTHBEARER auth. See the + // AccessTokenProvider interface docs for proper implementation + // guidelines. + TokenProvider AccessTokenProvider + + GSSAPI GSSAPIConfig } - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. KeepAlive time.Duration + + // LocalAddr is the local address to use when dialing an + // address. The address must be of a compatible type for the + // network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr + + Proxy struct { + // Whether or not to use proxy when connecting to the broker + // (defaults to false). + Enable bool + // The proxy dialer to use enabled (defaults to nil). + Dialer proxy.Dialer + } } // Metadata is the namespace for metadata management properties used by the @@ -67,11 +126,28 @@ type Config struct { // How long to wait for leader election to occur before retrying // (default 250ms). Similar to the JVM's `retry.backoff.ms`. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } // How frequently to refresh the cluster metadata in the background. // Defaults to 10 minutes. Set to 0 to disable. Similar to // `topic.metadata.refresh.interval.ms` in the JVM version. RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool + + // How long to wait for a successful metadata response. + // Disabled by default which means a metadata request against an unreachable + // cluster (all brokers are unreachable or unresponsive) can take up to + // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max` + // to fail. + Timeout time.Duration } // Producer is the namespace for configuration related to producing messages, @@ -93,13 +169,23 @@ type Config struct { // The type of compression to use on messages (defaults to no compression). // Similar to `compression.codec` setting of the JVM producer. Compression CompressionCodec + // The level of compression to use on messages. The meaning depends + // on the actual compression type used and defaults to default compression + // level for the codec. + CompressionLevel int // Generates partitioners for choosing the partition to send messages to // (defaults to hashing the message key). Similar to the `partitioner.class` // setting for the JVM producer. Partitioner PartitionerConstructor + // If enabled, the producer will ensure that exactly one copy of each message is + // written. + Idempotent bool // Return specifies what channels will be populated. If they are set to true, - // you must read from the respective channels to prevent deadlock. + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. Return struct { // If enabled, successfully delivered messages will be returned on the // Successes channel (default disabled). @@ -138,23 +224,72 @@ type Config struct { // (default 100ms). Similar to the `retry.backoff.ms` setting of the // JVM producer. Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries, maxRetries int) time.Duration } } // Consumer is the namespace for configuration related to consuming messages, // used by the Consumer. - // - // Note that Sarama's Consumer type does not currently support automatic - // consumer-group rebalancing and offset tracking. For Zookeeper-based - // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka - // library builds on Sarama to add this support. For Kafka-based tracking - // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library - // builds on Sarama to add this support. Consumer struct { + + // Group is the namespace for configuring consumer group. + Group struct { + Session struct { + // The timeout used to detect consumer failures when using Kafka's group management facility. + // The consumer sends periodic heartbeats to indicate its liveness to the broker. + // If no heartbeats are received by the broker before the expiration of this session timeout, + // then the broker will remove this consumer from the group and initiate a rebalance. + // Note that the value must be in the allowable range as configured in the broker configuration + // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s) + Timeout time.Duration + } + Heartbeat struct { + // The expected time between heartbeats to the consumer coordinator when using Kafka's group + // management facilities. Heartbeats are used to ensure that the consumer's session stays active and + // to facilitate rebalancing when new consumers join or leave the group. + // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no + // higher than 1/3 of that value. + // It can be adjusted even lower to control the expected time for normal rebalances (default 3s) + Interval time.Duration + } + Rebalance struct { + // Strategy for allocating topic partitions to members (default BalanceStrategyRange) + Strategy BalanceStrategy + // The maximum allowed time for each worker to join the group once a rebalance has begun. + // This is basically a limit on the amount of time needed for all tasks to flush any pending + // data and commit offsets. If the timeout is exceeded, then the worker will be removed from + // the group, which will cause offset commit failures (default 60s). + Timeout time.Duration + + Retry struct { + // When a new consumer joins a consumer group the set of consumers attempt to "rebalance" + // the load to assign partitions to each consumer. If the set of consumers changes while + // this assignment is taking place the rebalance will fail and retry. This setting controls + // the maximum number of attempts before giving up (default 4). + Max int + // Backoff time between retries during rebalance (default 2s) + Backoff time.Duration + } + } + Member struct { + // Custom metadata to include when joining the group. The user data for all joined members + // can be retrieved by sending a DescribeGroupRequest to the broker that is the + // coordinator for the group. + UserData []byte + } + } + Retry struct { // How long to wait after a failing to read from a partition before // trying again (default 2s). Backoff time.Duration + // Called to compute backoff time dynamically. Useful for implementing + // more sophisticated backoff strategies. This takes precedence over + // `Backoff` if set. + BackoffFunc func(retries int) time.Duration } // Fetch is the namespace for controlling how many bytes are retrieved by any @@ -166,7 +301,7 @@ type Config struct { // Equivalent to the JVM's `fetch.min.bytes`. Min int32 // The default number of message bytes to fetch from the broker in each - // request (default 32768). This should be larger than the majority of + // request (default 1MB). This should be larger than the majority of // your messages, or else the consumer will spend a lot of time // negotiating sizes and not actually consuming. Similar to the JVM's // `fetch.message.max.bytes`. @@ -187,11 +322,23 @@ type Config struct { // Equivalent to the JVM's `fetch.wait.max.ms`. MaxWaitTime time.Duration - // The maximum amount of time the consumer expects a message takes to process - // for the user. If writing to the Messages channel takes longer than this, - // that partition will stop fetching more messages until it can proceed again. + // The maximum amount of time the consumer expects a message takes to + // process for the user. If writing to the Messages channel takes longer + // than this, that partition will stop fetching more messages until it + // can proceed again. // Note that, since the Messages channel is buffered, the actual grace time is - // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms. + // If a message is not written to the Messages channel between two ticks + // of the expiryTicker then a timeout is detected. + // Using a ticker instead of a timer to detect timeouts should typically + // result in many fewer calls to Timer functions which may result in a + // significant performance improvement if many messages are being sent + // and timeouts are infrequent. + // The disadvantage of using a ticker instead of a timer is that + // timeouts will be less accurate. That is, the effective timeout could + // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For + // example, if `MaxProcessingTime` is 100ms then a delay of 180ms + // between two messages being sent may not be recognized as a timeout. MaxProcessingTime time.Duration // Return specifies what channels will be populated. If they are set to true, @@ -206,9 +353,21 @@ type Config struct { // offsets. This currently requires the manual use of an OffsetManager // but will eventually be automated. Offsets struct { - // How frequently to commit updated offsets. Defaults to 1s. + // Deprecated: CommitInterval exists for historical compatibility + // and should not be used. Please use Consumer.Offsets.AutoCommit CommitInterval time.Duration + // AutoCommit specifies configuration for commit messages automatically. + AutoCommit struct { + // Whether or not to auto-commit updated offsets back to the broker. + // (default enabled). + Enable bool + + // How frequently to commit updated offsets. Ineffective unless + // auto-commit is enabled (default 1s) + Interval time.Duration + } + // The initial offset to use if no offset was previously committed. // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. Initial int64 @@ -220,13 +379,28 @@ type Config struct { // broker version 0.9.0 or later. // (default is 0: disabled). Retention time.Duration + + Retry struct { + // The total number of times to retry failing commit + // requests during OffsetManager shutdown (default 3). + Max int + } } + + // IsolationLevel support 2 mode: + // - use `ReadUncommitted` (default) to consume and return all messages in message channel + // - use `ReadCommitted` to hide messages that are part of an aborted transaction + IsolationLevel IsolationLevel } // A user-provided string sent with every request to the brokers for logging, // debugging, and auditing purposes. Defaults to "sarama", but you should // probably set it to something specific to your application. ClientID string + // A rack identifier for this client. This can be any string value which + // indicates where this client is physically located. + // It corresponds with the broker config 'broker.rack' + RackID string // The number of events to buffer in internal and external channels. This // permits the producer and consumer to continue processing some messages // in the background while user code is working, greatly improving throughput. @@ -251,15 +425,21 @@ type Config struct { func NewConfig() *Config { c := &Config{} + c.Admin.Retry.Max = 5 + c.Admin.Retry.Backoff = 100 * time.Millisecond + c.Admin.Timeout = 3 * time.Second + c.Net.MaxOpenRequests = 5 c.Net.DialTimeout = 30 * time.Second c.Net.ReadTimeout = 30 * time.Second c.Net.WriteTimeout = 30 * time.Second c.Net.SASL.Handshake = true + c.Net.SASL.Version = SASLHandshakeV0 c.Metadata.Retry.Max = 3 c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true c.Producer.MaxMessageBytes = 1000000 c.Producer.RequiredAcks = WaitForLocal @@ -268,19 +448,29 @@ func NewConfig() *Config { c.Producer.Retry.Max = 3 c.Producer.Retry.Backoff = 100 * time.Millisecond c.Producer.Return.Errors = true + c.Producer.CompressionLevel = CompressionLevelDefault c.Consumer.Fetch.Min = 1 - c.Consumer.Fetch.Default = 32768 + c.Consumer.Fetch.Default = 1024 * 1024 c.Consumer.Retry.Backoff = 2 * time.Second c.Consumer.MaxWaitTime = 250 * time.Millisecond c.Consumer.MaxProcessingTime = 100 * time.Millisecond c.Consumer.Return.Errors = false - c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.AutoCommit.Enable = true + c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second c.Consumer.Offsets.Initial = OffsetNewest + c.Consumer.Offsets.Retry.Max = 3 + + c.Consumer.Group.Session.Timeout = 10 * time.Second + c.Consumer.Group.Heartbeat.Interval = 3 * time.Second + c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange + c.Consumer.Group.Rebalance.Timeout = 60 * time.Second + c.Consumer.Group.Rebalance.Retry.Max = 4 + c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second c.ClientID = defaultClientID c.ChannelBufferSize = 256 - c.Version = minVersion + c.Version = MinVersion c.MetricRegistry = metrics.NewRegistry() return c @@ -290,10 +480,10 @@ func NewConfig() *Config { // ConfigurationError if the specified values don't make sense. func (c *Config) Validate() error { // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + if !c.Net.TLS.Enable && c.Net.TLS.Config != nil { Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") } - if c.Net.SASL.Enable == false { + if !c.Net.SASL.Enable { if c.Net.SASL.User != "" { Logger.Println("Net.SASL is disabled but a non-empty username was provided.") } @@ -305,10 +495,13 @@ func (c *Config) Validate() error { Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") } if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { - Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") } if c.Producer.Flush.Bytes >= int(MaxRequestSize) { - Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") } if c.Producer.Timeout%time.Millisecond != 0 { Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") @@ -322,6 +515,15 @@ func (c *Config) Validate() error { if c.Consumer.Offsets.Retention%time.Millisecond != 0 { Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") } + if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 { + Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.") + } if c.ClientID == defaultClientID { Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") } @@ -336,12 +538,71 @@ func (c *Config) Validate() error { return ConfigurationError("Net.ReadTimeout must be > 0") case c.Net.WriteTimeout <= 0: return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") - case c.Net.SASL.Enable == true && c.Net.SASL.User == "": - return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") - case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": - return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + case c.Net.SASL.Enable: + if c.Net.SASL.Mechanism == "" { + c.Net.SASL.Mechanism = SASLTypePlaintext + } + + switch c.Net.SASL.Mechanism { + case SASLTypePlaintext: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + case SASLTypeOAuth: + if c.Net.SASL.TokenProvider == nil { + return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider") + } + case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512: + if c.Net.SASL.User == "" { + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + } + if c.Net.SASL.Password == "" { + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + if c.Net.SASL.SCRAMClientGeneratorFunc == nil { + return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc") + } + case SASLTypeGSSAPI: + if c.Net.SASL.GSSAPI.ServiceName == "" { + return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used") + } + + if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH { + if c.Net.SASL.GSSAPI.Password == "" { + return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " + + "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH") + } + } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH { + if c.Net.SASL.GSSAPI.KeyTabPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" + + " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH") + } + } else { + return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH") + } + if c.Net.SASL.GSSAPI.KerberosConfigPath == "" { + return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Username == "" { + return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used") + } + if c.Net.SASL.GSSAPI.Realm == "" { + return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used") + } + default: + msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`", + SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI) + return ConfigurationError(msg) + } + } + + // validate the Admin values + switch { + case c.Admin.Timeout <= 0: + return ConfigurationError("Admin.Timeout must be > 0") } // validate the Metadata values @@ -384,6 +645,33 @@ func (c *Config) Validate() error { return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") } + if c.Producer.Compression == CompressionGZIP { + if c.Producer.CompressionLevel != CompressionLevelDefault { + if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil { + return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err)) + } + } + } + + if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) { + return ConfigurationError("zstd compression requires Version >= V2_1_0_0") + } + + if c.Producer.Idempotent { + if !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0") + } + if c.Producer.Retry.Max == 0 { + return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1") + } + if c.Producer.RequiredAcks != WaitForAll { + return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll") + } + if c.Net.MaxOpenRequests > 1 { + return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1") + } + } + // validate the Consumer values switch { case c.Consumer.Fetch.Min <= 0: @@ -398,11 +686,42 @@ func (c *Config) Validate() error { return ConfigurationError("Consumer.MaxProcessingTime must be > 0") case c.Consumer.Retry.Backoff < 0: return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - case c.Consumer.Offsets.CommitInterval <= 0: - return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.AutoCommit.Interval <= 0: + return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0") case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + case c.Consumer.Offsets.Retry.Max < 0: + return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0") + case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted: + return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted") + } + if c.Consumer.Offsets.CommitInterval != 0 { + Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" + + " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored") + } + + // validate IsolationLevel + if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) { + return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0") + } + + // validate the Consumer Group values + switch { + case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond: + return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms") + case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms") + case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout: + return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout") + case c.Consumer.Group.Rebalance.Strategy == nil: + return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty") + case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond: + return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms") + case c.Consumer.Group.Rebalance.Retry.Max < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0") + case c.Consumer.Group.Rebalance.Retry.Backoff < 0: + return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0") } // validate misc shared values @@ -415,3 +734,16 @@ func (c *Config) Validate() error { return nil } + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go new file mode 100644 index 00000000..bef1053a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_resource_type.go @@ -0,0 +1,18 @@ +package sarama + +// ConfigResourceType is a type for resources that have configs. +type ConfigResourceType int8 + +// Taken from: +// https://github.com/apache/kafka/blob/ed7c071e07f1f90e4c2895582f61ca090ced3c42/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L32-L55 + +const ( + // UnknownResource constant type + UnknownResource ConfigResourceType = 0 + // TopicResource constant type + TopicResource ConfigResourceType = 2 + // BrokerResource constant type + BrokerResource ConfigResourceType = 4 + // BrokerLoggerResource constant type + BrokerLoggerResource ConfigResourceType = 8 +) diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go index ddac564f..e16d08aa 100644 --- a/vendor/github.com/Shopify/sarama/consumer.go +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -3,18 +3,24 @@ package sarama import ( "errors" "fmt" + "math" "sync" "sync/atomic" "time" + + "github.com/rcrowley/go-metrics" ) // ConsumerMessage encapsulates a Kafka message returned by the consumer. type ConsumerMessage struct { + Headers []*RecordHeader // only set if kafka is version 0.11+ + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp + Key, Value []byte Topic string Partition int32 Offset int64 - Timestamp time.Time // only set if kafka is version 0.10+ } // ConsumerError is what is provided to the user when an error occurs. @@ -41,13 +47,7 @@ func (ce ConsumerErrors) Error() string { // Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() // on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of // scope. -// -// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. -// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library -// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the -// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. type Consumer interface { - // Topics returns the set of available topics as retrieved from the cluster // metadata. This method is the same as Client.Topics(), and is provided for // convenience. @@ -73,13 +73,11 @@ type Consumer interface { } type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex + conf *Config children map[string]map[int32]*partitionConsumer brokerConsumers map[*Broker]*brokerConsumer + client Client + lock sync.Mutex } // NewConsumer creates a new consumer using the given broker addresses and configuration. @@ -88,18 +86,19 @@ func NewConsumer(addrs []string, config *Config) (Consumer, error) { if err != nil { return nil, err } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil + return newConsumer(client) } // NewConsumerFromClient creates a new consumer using the given client. It is still // necessary to call Close() on the underlying client when shutting down this consumer. func NewConsumerFromClient(client Client) (Consumer, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumer(cli) +} + +func newConsumer(client Client) (Consumer, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient @@ -116,10 +115,7 @@ func NewConsumerFromClient(client Client) (Consumer, error) { } func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil + return c.client.Close() } func (c *consumer) Topics() ([]string, error) { @@ -246,9 +242,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // PartitionConsumer -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() -// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically -// when it passes out of scope. +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. // // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported @@ -257,19 +253,24 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. type PartitionConsumer interface { - - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will - // return immediately, after which you should wait until the 'messages' and - // 'errors' channel are drained. It is required to call this function, or - // Close before a consumer object passes out of scope, as it will otherwise - // leak memory. You must call this before calling Close on the underlying client. + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. AsyncClose() - // Close stops the PartitionConsumer from fetching messages. It is required to - // call this function (or AsyncClose) before a consumer object passes out of - // scope, as it will otherwise leak memory. You must call this before calling - // Close on the underlying client. + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. Close() error // Messages returns the read channel for the messages that are returned by @@ -289,22 +290,23 @@ type PartitionConsumer interface { } type partitionConsumer struct { - consumer *consumer - conf *Config - topic string - partition int32 + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + consumer *consumer + conf *Config broker *brokerConsumer messages chan *ConsumerMessage errors chan *ConsumerError feeder chan *FetchResponse trigger, dying chan none + closeOnce sync.Once + topic string + partition int32 responseResult error - - fetchSize int32 - offset int64 - highWaterMarkOffset int64 + fetchSize int32 + offset int64 + retries int32 } var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing @@ -323,12 +325,20 @@ func (child *partitionConsumer) sendError(err error) { } } +func (child *partitionConsumer) computeBackoff() time.Duration { + if child.conf.Consumer.Retry.BackoffFunc != nil { + retries := atomic.AddInt32(&child.retries, 1) + return child.conf.Consumer.Retry.BackoffFunc(int(retries)) + } + return child.conf.Consumer.Retry.Backoff +} + func (child *partitionConsumer) dispatcher() { - for _ = range child.trigger { + for range child.trigger { select { case <-child.dying: close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): + case <-time.After(child.computeBackoff()): if child.broker != nil { child.consumer.unrefBrokerConsumer(child.broker) child.broker = nil @@ -404,18 +414,14 @@ func (child *partitionConsumer) AsyncClose() { // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will // also just close itself) - close(child.dying) + child.closeOnce.Do(func() { + close(child.dying) + }) } func (child *partitionConsumer) Close() error { child.AsyncClose() - go withRecover(func() { - for _ = range child.messages { - // drain - } - }) - var errors ConsumerErrors for err := range child.errors { errors = append(errors, err) @@ -433,43 +439,137 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 { func (child *partitionConsumer) responseFeeder() { var msgs []*ConsumerMessage - expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) - expireTimedOut := false + expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime) + firstAttempt := true feederLoop: for response := range child.feeder { msgs, child.responseResult = child.parseResponse(response) - for i, msg := range msgs { - if !expiryTimer.Stop() && !expireTimedOut { - // expiryTimer was expired; clear out the waiting msg - <-expiryTimer.C - } - expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime) - expireTimedOut = false + if child.responseResult == nil { + atomic.StoreInt32(&child.retries, 0) + } + for i, msg := range msgs { + messageSelect: select { - case child.messages <- msg: - case <-expiryTimer.C: - expireTimedOut = true - child.responseResult = errTimedOut + case <-child.dying: child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg - } - child.broker.input <- child continue feederLoop + case child.messages <- msg: + firstAttempt = true + case <-expiryTicker.C: + if !firstAttempt { + child.responseResult = errTimedOut + child.broker.acks.Done() + remainingLoop: + for _, msg = range msgs[i:] { + select { + case child.messages <- msg: + case <-child.dying: + break remainingLoop + } + } + child.broker.input <- child + continue feederLoop + } else { + // current message has not been sent, return to select + // statement + firstAttempt = false + goto messageSelect + } } } child.broker.acks.Done() } + expiryTicker.Stop() close(child.messages) close(child.errors) } +func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) { + var messages []*ConsumerMessage + for _, msgBlock := range msgSet.Messages { + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + timestamp := msg.Msg.Timestamp + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + if msg.Msg.LogAppendTime { + timestamp = msgBlock.Msg.Timestamp + } + } + if offset < child.offset { + continue + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + +func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) { + messages := make([]*ConsumerMessage, 0, len(batch.Records)) + + for _, rec := range batch.Records { + offset := batch.FirstOffset + rec.OffsetDelta + if offset < child.offset { + continue + } + timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta) + if batch.LogAppendTime { + timestamp = batch.MaxTimestamp + } + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: rec.Key, + Value: rec.Value, + Offset: offset, + Timestamp: timestamp, + Headers: rec.Headers, + }) + child.offset = offset + 1 + } + if len(messages) == 0 { + child.offset++ + } + return messages, nil +} + func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + var ( + metricRegistry = child.conf.MetricRegistry + consumerBatchSizeMetric metrics.Histogram + ) + + if metricRegistry != nil { + consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry) + } + + // If request was throttled and empty we log and return without error + if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 { + Logger.Printf( + "consumer/broker/%d FetchResponse throttled %v\n", + child.broker.broker.ID(), response.ThrottleTime) + return nil, nil + } + block := response.GetBlock(child.topic, child.partition) if block == nil { return nil, ErrIncompleteResponse @@ -479,16 +579,31 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu return nil, block.Err } - if len(block.MsgSet.Messages) == 0 { + nRecs, err := block.numRecords() + if err != nil { + return nil, err + } + + consumerBatchSizeMetric.Update(int64(nRecs)) + + if nRecs == 0 { + partialTrailingMessage, err := block.isPartial() + if err != nil { + return nil, err + } // We got no messages. If we got a trailing one then we need to ask for more data. // Otherwise we just poll again and wait for one to be produced... - if block.MsgSet.PartialTrailingMessage { + if partialTrailingMessage { if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { // we can't ask for more data, we've hit the configured limit child.sendError(ErrMessageTooLarge) child.offset++ // skip this one so we can keep processing future messages } else { child.fetchSize *= 2 + // check int32 overflow + if child.fetchSize < 0 { + child.fetchSize = math.MaxInt32 + } if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { child.fetchSize = child.conf.Consumer.Fetch.Max } @@ -502,54 +617,89 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu child.fetchSize = child.conf.Consumer.Fetch.Default atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - incomplete := false - prelude := true - var messages []*ConsumerMessage - for _, msgBlock := range block.MsgSet.Messages { + // abortedProducerIDs contains producerID which message should be ignored as uncommitted + // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset) + // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over + abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions)) + abortedTransactions := block.getAbortedTransactions() + + messages := []*ConsumerMessage{} + for _, records := range block.RecordsSet { + switch records.recordsType { + case legacyRecords: + messageSetMessages, err := child.parseMessages(records.MsgSet) + if err != nil { + return nil, err + } - for _, msg := range msgBlock.Messages() { - offset := msg.Offset - if msg.Msg.Version >= 1 { - baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset - offset += baseOffset + messages = append(messages, messageSetMessages...) + case defaultRecords: + // Consume remaining abortedTransaction up to last offset of current batch + for _, txn := range abortedTransactions { + if txn.FirstOffset > records.RecordBatch.LastOffset() { + break + } + abortedProducerIDs[txn.ProducerID] = struct{}{} + // Pop abortedTransactions so that we never add it again + abortedTransactions = abortedTransactions[1:] } - if prelude && offset < child.offset { + + recordBatchMessages, err := child.parseRecords(records.RecordBatch) + if err != nil { + return nil, err + } + + // Parse and commit offset but do not expose messages that are: + // - control records + // - part of an aborted transaction when set to `ReadCommitted` + + // control record + isControl, err := records.isControl() + if err != nil { + // I don't know why there is this continue in case of error to begin with + // Safe bet is to ignore control messages if ReadUncommitted + // and block on them in case of error and ReadCommitted + if child.conf.Consumer.IsolationLevel == ReadCommitted { + return nil, err + } continue } - prelude = false - - if offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: offset, - Timestamp: msg.Msg.Timestamp, - }) - child.offset = offset + 1 - } else { - incomplete = true + if isControl { + controlRecord, err := records.getControlRecord() + if err != nil { + return nil, err + } + + if controlRecord.Type == ControlRecordAbort { + delete(abortedProducerIDs, records.RecordBatch.ProducerID) + } + continue } - } - } + // filter aborted transactions + if child.conf.Consumer.IsolationLevel == ReadCommitted { + _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID] + if records.RecordBatch.IsTransactional && isAborted { + continue + } + } - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse + messages = append(messages, recordBatchMessages...) + default: + return nil, fmt.Errorf("unknown records type: %v", records.recordsType) + } } + return messages, nil } -// brokerConsumer - type brokerConsumer struct { consumer *consumer broker *Broker input chan *partitionConsumer newSubscriptions chan []*partitionConsumer - wait chan none subscriptions map[*partitionConsumer]none + wait chan none acks sync.WaitGroup refs int } @@ -571,14 +721,14 @@ func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { return bc } +// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer +// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks +// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give +// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, +// so the main goroutine can block waiting for work if it has none. func (bc *brokerConsumer) subscriptionManager() { var buffer []*partitionConsumer - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. for { if len(buffer) > 0 { select { @@ -611,10 +761,10 @@ done: close(bc.newSubscriptions) } +//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available func (bc *brokerConsumer) subscriptionConsumer() { <-bc.wait // wait for our first piece of work - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available for newSubscriptions := range bc.newSubscriptions { bc.updateSubscriptions(newSubscriptions) @@ -655,20 +805,20 @@ func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsu close(child.trigger) delete(bc.subscriptions, child) default: - break + // no-op } } } +//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed for child := range bc.subscriptions { result := child.responseResult child.responseResult = nil switch result { case nil: - break + // no-op case errTimedOut: Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", bc.broker.ID(), child.topic, child.partition) @@ -723,9 +873,35 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { MinBytes: bc.consumer.conf.Consumer.Fetch.Min, MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), } + if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) { + request.Version = 1 + } if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { request.Version = 2 } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 4 + request.Isolation = bc.consumer.conf.Consumer.IsolationLevel + } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } + if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { + request.Version = 10 + } + if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) { + request.Version = 11 + request.RackID = bc.consumer.conf.RackID + } for child := range bc.subscriptions { request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go new file mode 100644 index 00000000..056b9e38 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group.go @@ -0,0 +1,867 @@ +package sarama + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" +) + +// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed. +var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed") + +// ConsumerGroup is responsible for dividing up processing of topics and partitions +// over a collection of processes (the members of the consumer group). +type ConsumerGroup interface { + // Consume joins a cluster of consumers for a given list of topics and + // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler. + // + // The life-cycle of a session is represented by the following steps: + // + // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers) + // and is assigned their "fair share" of partitions, aka 'claims'. + // 2. Before processing starts, the handler's Setup() hook is called to notify the user + // of the claims and allow any necessary preparation or alteration of state. + // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called + // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected + // from concurrent reads/writes. + // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the + // parent context is cancelled or when a server-side rebalance cycle is initiated. + // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called + // to allow the user to perform any final tasks before a rebalance. + // 6. Finally, marked offsets are committed one last time before claims are released. + // + // Please note, that once a rebalance is triggered, sessions must be completed within + // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit + // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout + // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset + // commit failures. + // This method should be called inside an infinite loop, when a + // server-side rebalance happens, the consumer session will need to be + // recreated to get the new claims. + Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error + + // Errors returns a read channel of errors that occurred during the consumer life-cycle. + // By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan error + + // Close stops the ConsumerGroup and detaches any running sessions. It is required to call + // this function before the object passes out of scope, as it will otherwise leak memory. + Close() error +} + +type consumerGroup struct { + client Client + + config *Config + consumer Consumer + groupID string + memberID string + errors chan error + + lock sync.Mutex + closed chan none + closeOnce sync.Once + + userData []byte +} + +// NewConsumerGroup creates a new consumer group the given broker addresses and configuration. +func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := newConsumerGroup(groupID, client) + if err != nil { + _ = client.Close() + } + return c, err +} + +// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +// PLEASE NOTE: consumer groups can only re-use but not share clients. +func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) { + // For clients passed in by the client, ensure we don't + // call Close() on it. + cli := &nopCloserClient{client} + return newConsumerGroup(groupID, cli) +} + +func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) { + config := client.Config() + if !config.Version.IsAtLeast(V0_10_2_0) { + return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0") + } + + consumer, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + + return &consumerGroup{ + client: client, + consumer: consumer, + config: config, + groupID: groupID, + errors: make(chan error, config.ChannelBufferSize), + closed: make(chan none), + }, nil +} + +// Errors implements ConsumerGroup. +func (c *consumerGroup) Errors() <-chan error { return c.errors } + +// Close implements ConsumerGroup. +func (c *consumerGroup) Close() (err error) { + c.closeOnce.Do(func() { + close(c.closed) + + // leave group + if e := c.leave(); e != nil { + err = e + } + + // drain errors + go func() { + close(c.errors) + }() + for e := range c.errors { + err = e + } + + if e := c.client.Close(); e != nil { + err = e + } + }) + return +} + +// Consume implements ConsumerGroup. +func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error { + // Ensure group is not closed + select { + case <-c.closed: + return ErrClosedConsumerGroup + default: + } + + c.lock.Lock() + defer c.lock.Unlock() + + // Quick exit when no topics are provided + if len(topics) == 0 { + return fmt.Errorf("no topics provided") + } + + // Refresh metadata for requested topics + if err := c.client.RefreshMetadata(topics...); err != nil { + return err + } + + // Init session + sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max) + if err == ErrClosedClient { + return ErrClosedConsumerGroup + } else if err != nil { + return err + } + + // loop check topic partition numbers changed + // will trigger rebalance when any topic partitions number had changed + // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine + go c.loopCheckPartitionNumbers(topics, sess) + + // Wait for session exit signal + <-sess.ctx.Done() + + // Gracefully release session claims + return sess.release(true) +} + +func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) { + select { + case <-c.closed: + return nil, ErrClosedConsumerGroup + case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff): + } + + if refreshCoordinator { + err := c.client.RefreshCoordinator(c.groupID) + if err != nil { + return c.retryNewSession(ctx, topics, handler, retries, true) + } + } + + return c.newSession(ctx, topics, handler, retries-1) +} + +func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) { + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + if retries <= 0 { + return nil, err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + } + + // Join consumer group + join, err := c.joinGroupRequest(coordinator, topics) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch join.Err { + case ErrNoError: + c.memberID = join.MemberId + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, join.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, join.Err + } + + // Prepare distribution plan if we joined as the leader + var plan BalanceStrategyPlan + if join.LeaderId == join.MemberId { + members, err := join.GetMembers() + if err != nil { + return nil, err + } + + plan, err = c.balance(members) + if err != nil { + return nil, err + } + } + + // Sync consumer group + sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId) + if err != nil { + _ = coordinator.Close() + return nil, err + } + switch sync.Err { + case ErrNoError: + case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately + c.memberID = "" + return c.newSession(ctx, topics, handler, retries) + case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh + if retries <= 0 { + return nil, sync.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, true) + case ErrRebalanceInProgress: // retry after backoff + if retries <= 0 { + return nil, sync.Err + } + + return c.retryNewSession(ctx, topics, handler, retries, false) + default: + return nil, sync.Err + } + + // Retrieve and sort claims + var claims map[string][]int32 + if len(sync.MemberAssignment) > 0 { + members, err := sync.GetMemberAssignment() + if err != nil { + return nil, err + } + claims = members.Topics + c.userData = members.UserData + + for _, partitions := range claims { + sort.Sort(int32Slice(partitions)) + } + } + + return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler) +} + +func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) { + req := &JoinGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond), + ProtocolType: "consumer", + } + if c.config.Version.IsAtLeast(V0_10_1_0) { + req.Version = 1 + req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond) + } + + // use static user-data if configured, otherwise use consumer-group userdata from the last sync + userData := c.config.Consumer.Group.Member.UserData + if len(userData) == 0 { + userData = c.userData + } + meta := &ConsumerGroupMemberMetadata{ + Topics: topics, + UserData: userData, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil { + return nil, err + } + + return coordinator.JoinGroup(req) +} + +func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) { + req := &SyncGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + GenerationId: generationID, + } + strategy := c.config.Consumer.Group.Rebalance.Strategy + for memberID, topics := range plan { + assignment := &ConsumerGroupMemberAssignment{Topics: topics} + userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID) + if err != nil { + return nil, err + } + assignment.UserData = userDataBytes + if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil { + return nil, err + } + } + return coordinator.SyncGroup(req) +} + +func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) { + req := &HeartbeatRequest{ + GroupId: c.groupID, + MemberId: memberID, + GenerationId: generationID, + } + + return coordinator.Heartbeat(req) +} + +func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) { + topics := make(map[string][]int32) + for _, meta := range members { + for _, topic := range meta.Topics { + topics[topic] = nil + } + } + + for topic := range topics { + partitions, err := c.client.Partitions(topic) + if err != nil { + return nil, err + } + topics[topic] = partitions + } + + strategy := c.config.Consumer.Group.Rebalance.Strategy + return strategy.Plan(members, topics) +} + +// Leaves the cluster, called by Close. +func (c *consumerGroup) leave() error { + c.lock.Lock() + defer c.lock.Unlock() + if c.memberID == "" { + return nil + } + + coordinator, err := c.client.Coordinator(c.groupID) + if err != nil { + return err + } + + resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{ + GroupId: c.groupID, + MemberId: c.memberID, + }) + if err != nil { + _ = coordinator.Close() + return err + } + + // Unset memberID + c.memberID = "" + + // Check response + switch resp.Err { + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError: + return nil + default: + return resp.Err + } +} + +func (c *consumerGroup) handleError(err error, topic string, partition int32) { + if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 { + err = &ConsumerError{ + Topic: topic, + Partition: partition, + Err: err, + } + } + + if !c.config.Consumer.Return.Errors { + Logger.Println(err) + return + } + + select { + case <-c.closed: + //consumer is closed + return + default: + } + + select { + case c.errors <- err: + default: + // no error listener + } +} + +func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) { + pause := time.NewTicker(c.config.Metadata.RefreshFrequency) + defer session.cancel() + defer pause.Stop() + var oldTopicToPartitionNum map[string]int + var err error + if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil { + return + } + for { + if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil { + return + } else { + for topic, num := range oldTopicToPartitionNum { + if newTopicToPartitionNum[topic] != num { + return // trigger the end of the session on exit + } + } + } + select { + case <-pause.C: + case <-session.ctx.Done(): + Logger.Printf("loop check partition number coroutine will exit, topics %s", topics) + // if session closed by other, should be exited + return + case <-c.closed: + return + } + } +} + +func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) { + topicToPartitionNum := make(map[string]int, len(topics)) + for _, topic := range topics { + if partitionNum, err := c.client.Partitions(topic); err != nil { + Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err) + return nil, err + } else { + topicToPartitionNum[topic] = len(partitionNum) + } + } + return topicToPartitionNum, nil +} + +// -------------------------------------------------------------------- + +// ConsumerGroupSession represents a consumer group member session. +type ConsumerGroupSession interface { + // Claims returns information about the claimed partitions by topic. + Claims() map[string][]int32 + + // MemberID returns the cluster member ID. + MemberID() string + + // GenerationID returns the current generation ID. + GenerationID() int32 + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(topic string, partition int32, offset int64, metadata string) + + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(topic string, partition int32, offset int64, metadata string) + + // MarkMessage marks a message as consumed. + MarkMessage(msg *ConsumerMessage, metadata string) + + // Context returns the session context. + Context() context.Context +} + +type consumerGroupSession struct { + parent *consumerGroup + memberID string + generationID int32 + handler ConsumerGroupHandler + + claims map[string][]int32 + offsets *offsetManager + ctx context.Context + cancel func() + + waitGroup sync.WaitGroup + releaseOnce sync.Once + hbDying, hbDead chan none +} + +func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) { + // init offset manager + offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client) + if err != nil { + return nil, err + } + + // init context + ctx, cancel := context.WithCancel(ctx) + + // init session + sess := &consumerGroupSession{ + parent: parent, + memberID: memberID, + generationID: generationID, + handler: handler, + offsets: offsets, + claims: claims, + ctx: ctx, + cancel: cancel, + hbDying: make(chan none), + hbDead: make(chan none), + } + + // start heartbeat loop + go sess.heartbeatLoop() + + // create a POM for each claim + for topic, partitions := range claims { + for _, partition := range partitions { + pom, err := offsets.ManagePartition(topic, partition) + if err != nil { + _ = sess.release(false) + return nil, err + } + + // handle POM errors + go func(topic string, partition int32) { + for err := range pom.Errors() { + sess.parent.handleError(err, topic, partition) + } + }(topic, partition) + } + } + + // perform setup + if err := handler.Setup(sess); err != nil { + _ = sess.release(true) + return nil, err + } + + // start consuming + for topic, partitions := range claims { + for _, partition := range partitions { + sess.waitGroup.Add(1) + + go func(topic string, partition int32) { + defer sess.waitGroup.Done() + + // cancel the as session as soon as the first + // goroutine exits + defer sess.cancel() + + // consume a single topic/partition, blocking + sess.consume(topic, partition) + }(topic, partition) + } + } + return sess, nil +} + +func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims } +func (s *consumerGroupSession) MemberID() string { return s.memberID } +func (s *consumerGroupSession) GenerationID() int32 { return s.generationID } + +func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.MarkOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + if pom := s.offsets.findPOM(topic, partition); pom != nil { + pom.ResetOffset(offset, metadata) + } +} + +func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) { + s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata) +} + +func (s *consumerGroupSession) Context() context.Context { + return s.ctx +} + +func (s *consumerGroupSession) consume(topic string, partition int32) { + // quick exit if rebalance is due + select { + case <-s.ctx.Done(): + return + case <-s.parent.closed: + return + default: + } + + // get next offset + offset := s.parent.config.Consumer.Offsets.Initial + if pom := s.offsets.findPOM(topic, partition); pom != nil { + offset, _ = pom.NextOffset() + } + + // create new claim + claim, err := newConsumerGroupClaim(s, topic, partition, offset) + if err != nil { + s.parent.handleError(err, topic, partition) + return + } + + // handle errors + go func() { + for err := range claim.Errors() { + s.parent.handleError(err, topic, partition) + } + }() + + // trigger close when session is done + go func() { + select { + case <-s.ctx.Done(): + case <-s.parent.closed: + } + claim.AsyncClose() + }() + + // start processing + if err := s.handler.ConsumeClaim(s, claim); err != nil { + s.parent.handleError(err, topic, partition) + } + + // ensure consumer is closed & drained + claim.AsyncClose() + for _, err := range claim.waitClosed() { + s.parent.handleError(err, topic, partition) + } +} + +func (s *consumerGroupSession) release(withCleanup bool) (err error) { + // signal release, stop heartbeat + s.cancel() + + // wait for consumers to exit + s.waitGroup.Wait() + + // perform release + s.releaseOnce.Do(func() { + if withCleanup { + if e := s.handler.Cleanup(s); e != nil { + s.parent.handleError(e, "", -1) + err = e + } + } + + if e := s.offsets.Close(); e != nil { + err = e + } + + close(s.hbDying) + <-s.hbDead + }) + + return +} + +func (s *consumerGroupSession) heartbeatLoop() { + defer close(s.hbDead) + defer s.cancel() // trigger the end of the session on exit + + pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval) + defer pause.Stop() + + retries := s.parent.config.Metadata.Retry.Max + for { + coordinator, err := s.parent.client.Coordinator(s.parent.groupID) + if err != nil { + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + select { + case <-s.hbDying: + return + case <-time.After(s.parent.config.Metadata.Retry.Backoff): + retries-- + } + continue + } + + resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID) + if err != nil { + _ = coordinator.Close() + + if retries <= 0 { + s.parent.handleError(err, "", -1) + return + } + + retries-- + continue + } + + switch resp.Err { + case ErrNoError: + retries = s.parent.config.Metadata.Retry.Max + case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: + return + default: + s.parent.handleError(resp.Err, "", -1) + return + } + + select { + case <-pause.C: + case <-s.hbDying: + return + } + } +} + +// -------------------------------------------------------------------- + +// ConsumerGroupHandler instances are used to handle individual topic/partition claims. +// It also provides hooks for your consumer group session life-cycle and allow you to +// trigger logic before or after the consume loop(s). +// +// PLEASE NOTE that handlers are likely be called from several goroutines concurrently, +// ensure that all state is safely protected against race conditions. +type ConsumerGroupHandler interface { + // Setup is run at the beginning of a new session, before ConsumeClaim. + Setup(ConsumerGroupSession) error + + // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited + // but before the offsets are committed for the very last time. + Cleanup(ConsumerGroupSession) error + + // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). + // Once the Messages() channel is closed, the Handler must finish its processing + // loop and exit. + ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error +} + +// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group. +type ConsumerGroupClaim interface { + // Topic returns the consumed topic name. + Topic() string + + // Partition returns the consumed partition. + Partition() int32 + + // InitialOffset returns the initial offset that was used as a starting point for this claim. + InitialOffset() int64 + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 + + // Messages returns the read channel for the messages that are returned by + // the broker. The messages channel will be closed when a new rebalance cycle + // is due. You must finish processing and mark offsets within + // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually + // re-assigned to another group member. + Messages() <-chan *ConsumerMessage +} + +type consumerGroupClaim struct { + topic string + partition int32 + offset int64 + PartitionConsumer +} + +func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) { + pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset) + if err == ErrOffsetOutOfRange { + offset = sess.parent.config.Consumer.Offsets.Initial + pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset) + } + if err != nil { + return nil, err + } + + go func() { + for err := range pcm.Errors() { + sess.parent.handleError(err, topic, partition) + } + }() + + return &consumerGroupClaim{ + topic: topic, + partition: partition, + offset: offset, + PartitionConsumer: pcm, + }, nil +} + +func (c *consumerGroupClaim) Topic() string { return c.topic } +func (c *consumerGroupClaim) Partition() int32 { return c.partition } +func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset } + +// Drains messages and errors, ensures the claim is fully closed. +func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) { + go func() { + for range c.Messages() { + } + }() + + for err := range c.Errors() { + errs = append(errs, err) + } + return +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go index 9d92d350..2d02cc38 100644 --- a/vendor/github.com/Shopify/sarama/consumer_group_members.go +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -1,5 +1,6 @@ package sarama +//ConsumerGroupMemberMetadata holds the metadata for consumer group type ConsumerGroupMemberMetadata struct { Version int16 Topics []string @@ -36,6 +37,7 @@ func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { return nil } +//ConsumerGroupMemberAssignment holds the member assignment for a consume group type ConsumerGroupMemberAssignment struct { Version int16 Topics map[string][]int32 diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go index 483be335..e5ebdaef 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -1,16 +1,24 @@ package sarama +//ConsumerMetadataRequest is used for metadata requests type ConsumerMetadataRequest struct { ConsumerGroup string } func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { - return pe.putString(r.ConsumerGroup) + tmp := new(FindCoordinatorRequest) + tmp.CoordinatorKey = r.ConsumerGroup + tmp.CoordinatorType = CoordinatorGroup + return tmp.encode(pe) } func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { - r.ConsumerGroup, err = pd.getString() - return err + tmp := new(FindCoordinatorRequest) + if err := tmp.decode(pd, version); err != nil { + return err + } + r.ConsumerGroup = tmp.CoordinatorKey + return nil } func (r *ConsumerMetadataRequest) key() int16 { @@ -21,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 { return 0 } +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go index 6b9632bb..1b5d00d2 100644 --- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -5,6 +5,7 @@ import ( "strconv" ) +//ConsumerMetadataResponse holds the response for a consumer group meta data requests type ConsumerMetadataResponse struct { Err KError Coordinator *Broker @@ -14,20 +15,18 @@ type ConsumerMetadataResponse struct { } func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) + tmp := new(FindCoordinatorResponse) - coordinator := new(Broker) - if err := coordinator.decode(pd); err != nil { + if err := tmp.decode(pd, version); err != nil { return err } - if coordinator.addr == ":0" { + + r.Err = tmp.Err + + r.Coordinator = tmp.Coordinator + if tmp.Coordinator == nil { return nil } - r.Coordinator = coordinator // this can all go away in 2.0, but we have to fill in deprecated fields to maintain // backwards compatibility @@ -47,28 +46,22 @@ func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err } func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if r.Coordinator != nil { - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - pe.putInt32(r.Coordinator.ID()) - if err := pe.putString(host); err != nil { - return err - } - pe.putInt32(int32(port)) - return nil + if r.Coordinator == nil { + r.Coordinator = new(Broker) + r.Coordinator.id = r.CoordinatorID + r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort))) } - pe.putInt32(r.CoordinatorID) - if err := pe.putString(r.CoordinatorHost); err != nil { + + tmp := &FindCoordinatorResponse{ + Version: 0, + Err: r.Err, + Coordinator: r.Coordinator, + } + + if err := tmp.encode(pe); err != nil { return err } - pe.putInt32(r.CoordinatorPort) + return nil } @@ -80,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 { return 0 } +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/Shopify/sarama/control_record.go new file mode 100644 index 00000000..9b75ab53 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/control_record.go @@ -0,0 +1,72 @@ +package sarama + +//ControlRecordType ... +type ControlRecordType int + +const ( + //ControlRecordAbort is a control record for abort + ControlRecordAbort ControlRecordType = iota + //ControlRecordCommit is a control record for commit + ControlRecordCommit + //ControlRecordUnknown is a control record of unknown type + ControlRecordUnknown +) + +// Control records are returned as a record by fetchRequest +// However unlike "normal" records, they mean nothing application wise. +// They only serve internal logic for supporting transactions. +type ControlRecord struct { + Version int16 + CoordinatorEpoch int32 + Type ControlRecordType +} + +func (cr *ControlRecord) decode(key, value packetDecoder) error { + var err error + cr.Version, err = value.getInt16() + if err != nil { + return err + } + + cr.CoordinatorEpoch, err = value.getInt32() + if err != nil { + return err + } + + // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not + // Either way, all these version can only be 0 for now + cr.Version, err = key.getInt16() + if err != nil { + return err + } + + recordType, err := key.getInt16() + if err != nil { + return err + } + + switch recordType { + case 0: + cr.Type = ControlRecordAbort + case 1: + cr.Type = ControlRecordCommit + default: + // from JAVA implementation: + // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored + cr.Type = ControlRecordUnknown + } + return nil +} + +func (cr *ControlRecord) encode(key, value packetEncoder) { + value.putInt16(cr.Version) + value.putInt32(cr.CoordinatorEpoch) + key.putInt16(cr.Version) + + switch cr.Type { + case ControlRecordAbort: + key.putInt16(0) + case ControlRecordCommit: + key.putInt16(1) + } +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go index 5c286079..38189a3c 100644 --- a/vendor/github.com/Shopify/sarama/crc32_field.go +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -2,13 +2,40 @@ package sarama import ( "encoding/binary" + "fmt" + "hash/crc32" + "sync" +) + +type crcPolynomial int8 - "github.com/klauspost/crc32" +const ( + crcIEEE crcPolynomial = iota + crcCastagnoli ) +var crc32FieldPool = sync.Pool{} + +func acquireCrc32Field(polynomial crcPolynomial) *crc32Field { + val := crc32FieldPool.Get() + if val != nil { + c := val.(*crc32Field) + c.polynomial = polynomial + return c + } + return newCRC32Field(polynomial) +} + +func releaseCrc32Field(c *crc32Field) { + crc32FieldPool.Put(c) +} + +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + // crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. type crc32Field struct { startOffset int + polynomial crcPolynomial } func (c *crc32Field) saveOffset(in int) { @@ -19,18 +46,41 @@ func (c *crc32Field) reserveLength() int { return 4 } +func newCRC32Field(polynomial crcPolynomial) *crc32Field { + return &crc32Field{polynomial: polynomial} +} + func (c *crc32Field) run(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } binary.BigEndian.PutUint32(buf[c.startOffset:], crc) return nil } func (c *crc32Field) check(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + crc, err := c.crc(curOffset, buf) + if err != nil { + return err + } - if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { - return PacketDecodingError{"CRC didn't match"} + expected := binary.BigEndian.Uint32(buf[c.startOffset:]) + if crc != expected { + return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)} } return nil } +func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) { + var tab *crc32.Table + switch c.polynomial { + case crcIEEE: + tab = crc32.IEEETable + case crcCastagnoli: + tab = castagnoliTable + default: + return 0, PacketDecodingError{"invalid CRC type"} + } + return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go new file mode 100644 index 00000000..46fb0440 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go @@ -0,0 +1,125 @@ +package sarama + +import "time" + +type CreatePartitionsRequest struct { + TopicPartitions map[string]*TopicPartition + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreatePartitionsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil { + return err + } + + for topic, partition := range c.TopicPartitions { + if err := pe.putString(topic); err != nil { + return err + } + if err := partition.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + pe.putBool(c.ValidateOnly) + + return nil +} + +func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + c.TopicPartitions = make(map[string]*TopicPartition, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitions[topic] = new(TopicPartition) + if err := c.TopicPartitions[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if c.ValidateOnly, err = pd.getBool(); err != nil { + return err + } + + return nil +} + +func (r *CreatePartitionsRequest) key() int16 { + return 37 +} + +func (r *CreatePartitionsRequest) version() int16 { + return 0 +} + +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + +func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartition struct { + Count int32 + Assignment [][]int32 +} + +func (t *TopicPartition) encode(pe packetEncoder) error { + pe.putInt32(t.Count) + + if len(t.Assignment) == 0 { + pe.putInt32(-1) + return nil + } + + if err := pe.putArrayLength(len(t.Assignment)); err != nil { + return err + } + + for _, assign := range t.Assignment { + if err := pe.putInt32Array(assign); err != nil { + return err + } + } + + return nil +} + +func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) { + if t.Count, err = pd.getInt32(); err != nil { + return err + } + + n, err := pd.getInt32() + if err != nil { + return err + } + if n <= 0 { + return nil + } + t.Assignment = make([][]int32, n) + + for i := 0; i < int(n); i++ { + if t.Assignment[i], err = pd.getInt32Array(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go new file mode 100644 index 00000000..12ce7885 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go @@ -0,0 +1,109 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreatePartitionsResponse struct { + ThrottleTime time.Duration + TopicPartitionErrors map[string]*TopicPartitionError +} + +func (c *CreatePartitionsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil { + return err + } + + for topic, partitionError := range c.TopicPartitionErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := partitionError.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicPartitionErrors[topic] = new(TopicPartitionError) + if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (r *CreatePartitionsResponse) key() int16 { + return 37 +} + +func (r *CreatePartitionsResponse) version() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + +func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type TopicPartitionError struct { + Err KError + ErrMsg *string +} + +func (t *TopicPartitionError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicPartitionError) encode(pe packetEncoder) error { + pe.putInt16(int16(t.Err)) + + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + + return nil +} + +func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kerr) + + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go new file mode 100644 index 00000000..287acd06 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_request.go @@ -0,0 +1,178 @@ +package sarama + +import ( + "time" +) + +type CreateTopicsRequest struct { + Version int16 + + TopicDetails map[string]*TopicDetail + Timeout time.Duration + ValidateOnly bool +} + +func (c *CreateTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { + return err + } + for topic, detail := range c.TopicDetails { + if err := pe.putString(topic); err != nil { + return err + } + if err := detail.encode(pe); err != nil { + return err + } + } + + pe.putInt32(int32(c.Timeout / time.Millisecond)) + + if c.Version >= 1 { + pe.putBool(c.ValidateOnly) + } + + return nil +} + +func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicDetails = make(map[string]*TopicDetail, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicDetails[topic] = new(TopicDetail) + if err = c.TopicDetails[topic].decode(pd, version); err != nil { + return err + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + c.Timeout = time.Duration(timeout) * time.Millisecond + + if version >= 1 { + c.ValidateOnly, err = pd.getBool() + if err != nil { + return err + } + + c.Version = version + } + + return nil +} + +func (c *CreateTopicsRequest) key() int16 { + return 19 +} + +func (c *CreateTopicsRequest) version() int16 { + return c.Version +} + +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + +func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicDetail struct { + NumPartitions int32 + ReplicationFactor int16 + ReplicaAssignment map[int32][]int32 + ConfigEntries map[string]*string +} + +func (t *TopicDetail) encode(pe packetEncoder) error { + pe.putInt32(t.NumPartitions) + pe.putInt16(t.ReplicationFactor) + + if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil { + return err + } + for partition, assignment := range t.ReplicaAssignment { + pe.putInt32(partition) + if err := pe.putInt32Array(assignment); err != nil { + return err + } + } + + if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil { + return err + } + for configKey, configValue := range t.ConfigEntries { + if err := pe.putString(configKey); err != nil { + return err + } + if err := pe.putNullableString(configValue); err != nil { + return err + } + } + + return nil +} + +func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) { + if t.NumPartitions, err = pd.getInt32(); err != nil { + return err + } + if t.ReplicationFactor, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ReplicaAssignment = make(map[int32][]int32, n) + for i := 0; i < n; i++ { + replica, err := pd.getInt32() + if err != nil { + return err + } + if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil { + return err + } + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.ConfigEntries = make(map[string]*string, n) + for i := 0; i < n; i++ { + configKey, err := pd.getString() + if err != nil { + return err + } + if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go new file mode 100644 index 00000000..7e1448a6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/create_topics_response.go @@ -0,0 +1,127 @@ +package sarama + +import ( + "fmt" + "time" +) + +type CreateTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrors map[string]*TopicError +} + +func (c *CreateTopicsResponse) encode(pe packetEncoder) error { + if c.Version >= 2 { + pe.putInt32(int32(c.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(c.TopicErrors)); err != nil { + return err + } + for topic, topicError := range c.TopicErrors { + if err := pe.putString(topic); err != nil { + return err + } + if err := topicError.encode(pe, c.Version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + c.Version = version + + if version >= 2 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + c.TopicErrors = make(map[string]*TopicError, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + c.TopicErrors[topic] = new(TopicError) + if err := c.TopicErrors[topic].decode(pd, version); err != nil { + return err + } + } + + return nil +} + +func (c *CreateTopicsResponse) key() int16 { + return 19 +} + +func (c *CreateTopicsResponse) version() int16 { + return c.Version +} + +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + +func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { + switch c.Version { + case 2: + return V1_0_0_0 + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} + +type TopicError struct { + Err KError + ErrMsg *string +} + +func (t *TopicError) Error() string { + text := t.Err.Error() + if t.ErrMsg != nil { + text = fmt.Sprintf("%s - %s", text, *t.ErrMsg) + } + return text +} + +func (t *TopicError) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(t.Err)) + + if version >= 1 { + if err := pe.putNullableString(t.ErrMsg); err != nil { + return err + } + } + + return nil +} + +func (t *TopicError) decode(pd packetDecoder, version int16) (err error) { + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + if version >= 1 { + if t.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go new file mode 100644 index 00000000..e4dc3c18 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/decompress.go @@ -0,0 +1,63 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "sync" + + snappy "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +var ( + lz4ReaderPool = sync.Pool{ + New: func() interface{} { + return lz4.NewReader(nil) + }, + } + + gzipReaderPool sync.Pool +) + +func decompress(cc CompressionCodec, data []byte) ([]byte, error) { + switch cc { + case CompressionNone: + return data, nil + case CompressionGZIP: + var ( + err error + reader *gzip.Reader + readerIntf = gzipReaderPool.Get() + ) + if readerIntf != nil { + reader = readerIntf.(*gzip.Reader) + } else { + reader, err = gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + } + + defer gzipReaderPool.Put(reader) + + if err := reader.Reset(bytes.NewReader(data)); err != nil { + return nil, err + } + + return ioutil.ReadAll(reader) + case CompressionSnappy: + return snappy.Decode(data) + case CompressionLZ4: + reader := lz4ReaderPool.Get().(*lz4.Reader) + defer lz4ReaderPool.Put(reader) + + reader.Reset(bytes.NewReader(data)) + return ioutil.ReadAll(reader) + case CompressionZSTD: + return zstdDecompress(nil, data) + default: + return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)} + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go new file mode 100644 index 00000000..4ac8bbee --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go @@ -0,0 +1,34 @@ +package sarama + +type DeleteGroupsRequest struct { + Groups []string +} + +func (r *DeleteGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DeleteGroupsRequest) key() int16 { + return 42 +} + +func (r *DeleteGroupsRequest) version() int16 { + return 0 +} + +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + +func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { + return V1_1_0_0 +} + +func (r *DeleteGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go new file mode 100644 index 00000000..5e7b1ed3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go @@ -0,0 +1,74 @@ +package sarama + +import ( + "time" +) + +type DeleteGroupsResponse struct { + ThrottleTime time.Duration + GroupErrorCodes map[string]KError +} + +func (r *DeleteGroupsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil { + return err + } + for groupID, errorCode := range r.GroupErrorCodes { + if err := pe.putString(groupID); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupErrorCodes = make(map[string]KError, n) + for i := 0; i < n; i++ { + groupID, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + r.GroupErrorCodes[groupID] = KError(errorCode) + } + + return nil +} + +func (r *DeleteGroupsResponse) key() int16 { + return 42 +} + +func (r *DeleteGroupsResponse) version() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + +func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { + return V1_1_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go new file mode 100644 index 00000000..dc106b17 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_request.go @@ -0,0 +1,130 @@ +package sarama + +import ( + "sort" + "time" +) + +// request message format is: +// [topic] timeout(int32) +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) offset(int64) + +type DeleteRecordsRequest struct { + Topics map[string]*DeleteRecordsRequestTopic + Timeout time.Duration +} + +func (d *DeleteRecordsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsRequestTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsRequestTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (d *DeleteRecordsRequest) key() int16 { + return 21 +} + +func (d *DeleteRecordsRequest) version() int16 { + return 0 +} + +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsRequestTopic struct { + PartitionOffsets map[int32]int64 // partition => offset +} + +func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil { + return err + } + keys := make([]int32, 0, len(t.PartitionOffsets)) + for partition := range t.PartitionOffsets { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + pe.putInt64(t.PartitionOffsets[partition]) + } + return nil +} + +func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.PartitionOffsets = make(map[int32]int64, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + offset, err := pd.getInt64() + if err != nil { + return err + } + t.PartitionOffsets[partition] = offset + } + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go new file mode 100644 index 00000000..d530b4c7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_records_response.go @@ -0,0 +1,162 @@ +package sarama + +import ( + "sort" + "time" +) + +// response message format is: +// throttleMs(int32) [topic] +// where topic is: +// name(string) [partition] +// where partition is: +// id(int32) low_watermark(int64) error_code(int16) + +type DeleteRecordsResponse struct { + Version int16 + ThrottleTime time.Duration + Topics map[string]*DeleteRecordsResponseTopic +} + +func (d *DeleteRecordsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(d.Topics)); err != nil { + return err + } + keys := make([]string, 0, len(d.Topics)) + for topic := range d.Topics { + keys = append(keys, topic) + } + sort.Strings(keys) + for _, topic := range keys { + if err := pe.putString(topic); err != nil { + return err + } + if err := d.Topics[topic].encode(pe); err != nil { + return err + } + } + return nil +} + +func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error { + d.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + d.Topics = make(map[string]*DeleteRecordsResponseTopic, n) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + details := new(DeleteRecordsResponseTopic) + if err = details.decode(pd, version); err != nil { + return err + } + d.Topics[topic] = details + } + } + + return nil +} + +func (d *DeleteRecordsResponse) key() int16 { + return 21 +} + +func (d *DeleteRecordsResponse) version() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type DeleteRecordsResponseTopic struct { + Partitions map[int32]*DeleteRecordsResponsePartition +} + +func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(t.Partitions)); err != nil { + return err + } + keys := make([]int32, 0, len(t.Partitions)) + for partition := range t.Partitions { + keys = append(keys, partition) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + for _, partition := range keys { + pe.putInt32(partition) + if err := t.Partitions[partition].encode(pe); err != nil { + return err + } + } + return nil +} + +func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + if n > 0 { + t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n) + for i := 0; i < n; i++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + details := new(DeleteRecordsResponsePartition) + if err = details.decode(pd, version); err != nil { + return err + } + t.Partitions[partition] = details + } + } + + return nil +} + +type DeleteRecordsResponsePartition struct { + LowWatermark int64 + Err KError +} + +func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error { + pe.putInt64(t.LowWatermark) + pe.putInt16(int16(t.Err)) + return nil +} + +func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error { + lowWatermark, err := pd.getInt64() + if err != nil { + return err + } + t.LowWatermark = lowWatermark + + kErr, err := pd.getInt16() + if err != nil { + return err + } + t.Err = KError(kErr) + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go new file mode 100644 index 00000000..ba6780a8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go @@ -0,0 +1,52 @@ +package sarama + +import "time" + +type DeleteTopicsRequest struct { + Version int16 + Topics []string + Timeout time.Duration +} + +func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { + if err := pe.putStringArray(d.Topics); err != nil { + return err + } + pe.putInt32(int32(d.Timeout / time.Millisecond)) + + return nil +} + +func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) { + if d.Topics, err = pd.getStringArray(); err != nil { + return err + } + timeout, err := pd.getInt32() + if err != nil { + return err + } + d.Timeout = time.Duration(timeout) * time.Millisecond + d.Version = version + return nil +} + +func (d *DeleteTopicsRequest) key() int16 { + return 20 +} + +func (d *DeleteTopicsRequest) version() int16 { + return d.Version +} + +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + +func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go new file mode 100644 index 00000000..733961a8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go @@ -0,0 +1,82 @@ +package sarama + +import "time" + +type DeleteTopicsResponse struct { + Version int16 + ThrottleTime time.Duration + TopicErrorCodes map[string]KError +} + +func (d *DeleteTopicsResponse) encode(pe packetEncoder) error { + if d.Version >= 1 { + pe.putInt32(int32(d.ThrottleTime / time.Millisecond)) + } + + if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil { + return err + } + for topic, errorCode := range d.TopicErrorCodes { + if err := pe.putString(topic); err != nil { + return err + } + pe.putInt16(int16(errorCode)) + } + + return nil +} + +func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + d.Version = version + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + d.TopicErrorCodes = make(map[string]KError, n) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + errorCode, err := pd.getInt16() + if err != nil { + return err + } + + d.TopicErrorCodes[topic] = KError(errorCode) + } + + return nil +} + +func (d *DeleteTopicsResponse) key() int16 { + return 20 +} + +func (d *DeleteTopicsResponse) version() int16 { + return d.Version +} + +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + +func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { + switch d.Version { + case 1: + return V0_11_0_0 + default: + return V0_10_1_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go new file mode 100644 index 00000000..d0c73528 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go @@ -0,0 +1,116 @@ +package sarama + +type DescribeConfigsRequest struct { + Version int16 + Resources []*ConfigResource + IncludeSynonyms bool +} + +type ConfigResource struct { + Type ConfigResourceType + Name string + ConfigNames []string +} + +func (r *DescribeConfigsRequest) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + pe.putInt8(int8(c.Type)) + if err := pe.putString(c.Name); err != nil { + return err + } + + if len(c.ConfigNames) == 0 { + pe.putInt32(-1) + continue + } + if err := pe.putStringArray(c.ConfigNames); err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putBool(r.IncludeSynonyms) + } + + return nil +} + +func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ConfigResource, n) + + for i := 0; i < n; i++ { + r.Resources[i] = &ConfigResource{} + t, err := pd.getInt8() + if err != nil { + return err + } + r.Resources[i].Type = ConfigResourceType(t) + name, err := pd.getString() + if err != nil { + return err + } + r.Resources[i].Name = name + + confLength, err := pd.getArrayLength() + + if err != nil { + return err + } + + if confLength == -1 { + continue + } + + cfnames := make([]string, confLength) + for i := 0; i < confLength; i++ { + s, err := pd.getString() + if err != nil { + return err + } + cfnames[i] = s + } + r.Resources[i].ConfigNames = cfnames + } + r.Version = version + if r.Version >= 1 { + b, err := pd.getBool() + if err != nil { + return err + } + r.IncludeSynonyms = b + } + + return nil +} + +func (r *DescribeConfigsRequest) key() int16 { + return 32 +} + +func (r *DescribeConfigsRequest) version() int16 { + return r.Version +} + +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_1_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go new file mode 100644 index 00000000..063ae911 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go @@ -0,0 +1,327 @@ +package sarama + +import ( + "fmt" + "time" +) + +type ConfigSource int8 + +func (s ConfigSource) String() string { + switch s { + case SourceUnknown: + return "Unknown" + case SourceTopic: + return "Topic" + case SourceDynamicBroker: + return "DynamicBroker" + case SourceDynamicDefaultBroker: + return "DynamicDefaultBroker" + case SourceStaticBroker: + return "StaticBroker" + case SourceDefault: + return "Default" + } + return fmt.Sprintf("Source Invalid: %d", int(s)) +} + +const ( + SourceUnknown ConfigSource = iota + SourceTopic + SourceDynamicBroker + SourceDynamicDefaultBroker + SourceStaticBroker + SourceDefault +) + +type DescribeConfigsResponse struct { + Version int16 + ThrottleTime time.Duration + Resources []*ResourceResponse +} + +type ResourceResponse struct { + ErrorCode int16 + ErrorMsg string + Type ConfigResourceType + Name string + Configs []*ConfigEntry +} + +type ConfigEntry struct { + Name string + Value string + ReadOnly bool + Default bool + Source ConfigSource + Sensitive bool + Synonyms []*ConfigSynonym +} + +type ConfigSynonym struct { + ConfigName string + ConfigValue string + Source ConfigSource +} + +func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + if err = pe.putArrayLength(len(r.Resources)); err != nil { + return err + } + + for _, c := range r.Resources { + if err = c.encode(pe, r.Version); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Resources = make([]*ResourceResponse, n) + for i := 0; i < n; i++ { + rr := &ResourceResponse{} + if err := rr.decode(pd, version); err != nil { + return err + } + r.Resources[i] = rr + } + + return nil +} + +func (r *DescribeConfigsResponse) key() int16 { + return 32 +} + +func (r *DescribeConfigsResponse) version() int16 { + return r.Version +} + +func (r *DescribeConfigsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V1_0_0_0 + case 2: + return V2_0_0_0 + default: + return V0_11_0_0 + } +} + +func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(r.ErrorCode) + + if err = pe.putString(r.ErrorMsg); err != nil { + return err + } + + pe.putInt8(int8(r.Type)) + + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putArrayLength(len(r.Configs)); err != nil { + return err + } + + for _, c := range r.Configs { + if err = c.encode(pe, version); err != nil { + return err + } + } + return nil +} + +func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) { + ec, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = ec + + em, err := pd.getString() + if err != nil { + return err + } + r.ErrorMsg = em + + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ConfigResourceType(t) + + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Configs = make([]*ConfigEntry, n) + for i := 0; i < n; i++ { + c := &ConfigEntry{} + if err := c.decode(pd, version); err != nil { + return err + } + r.Configs[i] = c + } + return nil +} + +func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) { + if err = pe.putString(r.Name); err != nil { + return err + } + + if err = pe.putString(r.Value); err != nil { + return err + } + + pe.putBool(r.ReadOnly) + + if version <= 0 { + pe.putBool(r.Default) + pe.putBool(r.Sensitive) + } else { + pe.putInt8(int8(r.Source)) + pe.putBool(r.Sensitive) + + if err := pe.putArrayLength(len(r.Synonyms)); err != nil { + return err + } + for _, c := range r.Synonyms { + if err = c.encode(pe, version); err != nil { + return err + } + } + } + + return nil +} + +//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration +func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) { + if version == 0 { + r.Source = SourceUnknown + } + name, err := pd.getString() + if err != nil { + return err + } + r.Name = name + + value, err := pd.getString() + if err != nil { + return err + } + r.Value = value + + read, err := pd.getBool() + if err != nil { + return err + } + r.ReadOnly = read + + if version == 0 { + defaultB, err := pd.getBool() + if err != nil { + return err + } + r.Default = defaultB + if defaultB { + r.Source = SourceDefault + } + } else { + source, err := pd.getInt8() + if err != nil { + return err + } + r.Source = ConfigSource(source) + r.Default = r.Source == SourceDefault + } + + sensitive, err := pd.getBool() + if err != nil { + return err + } + r.Sensitive = sensitive + + if version > 0 { + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Synonyms = make([]*ConfigSynonym, n) + + for i := 0; i < n; i++ { + s := &ConfigSynonym{} + if err := s.decode(pd, version); err != nil { + return err + } + r.Synonyms[i] = s + } + } + return nil +} + +func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) { + err = pe.putString(c.ConfigName) + if err != nil { + return err + } + + err = pe.putString(c.ConfigValue) + if err != nil { + return err + } + + pe.putInt8(int8(c.Source)) + + return nil +} + +func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error { + name, err := pd.getString() + if err != nil { + return nil + } + c.ConfigName = name + + value, err := pd.getString() + if err != nil { + return nil + } + c.ConfigValue = value + + source, err := pd.getInt8() + if err != nil { + return nil + } + c.Source = ConfigSource(source) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go index 1fb35677..f8962da5 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_request.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 { return 0 } +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go index d2c2071e..bc242e42 100644 --- a/vendor/github.com/Shopify/sarama/describe_groups_response.go +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 { return 0 } +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } @@ -89,12 +93,13 @@ func (gd *GroupDescription) encode(pe packetEncoder) error { } func (gd *GroupDescription) decode(pd packetDecoder) (err error) { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - gd.Err = KError(kerr) } + gd.Err = KError(kerr) + if gd.GroupId, err = pd.getString(); err != nil { return } diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go new file mode 100644 index 00000000..c0bf04e0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go @@ -0,0 +1,87 @@ +package sarama + +// DescribeLogDirsRequest is a describe request to get partitions' log size +type DescribeLogDirsRequest struct { + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + // If this is an empty array, all topics will be queried + DescribeTopics []DescribeLogDirsRequestTopic +} + +// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic +type DescribeLogDirsRequestTopic struct { + Topic string + PartitionIDs []int32 +} + +func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error { + length := len(r.DescribeTopics) + if length == 0 { + // In order to query all topics we must send null + length = -1 + } + + if err := pe.putArrayLength(length); err != nil { + return err + } + + for _, d := range r.DescribeTopics { + if err := pe.putString(d.Topic); err != nil { + return err + } + + if err := pe.putInt32Array(d.PartitionIDs); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error { + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == -1 { + n = 0 + } + + topics := make([]DescribeLogDirsRequestTopic, n) + for i := 0; i < n; i++ { + topics[i] = DescribeLogDirsRequestTopic{} + + topic, err := pd.getString() + if err != nil { + return err + } + topics[i].Topic = topic + + pIDs, err := pd.getInt32Array() + if err != nil { + return err + } + topics[i].PartitionIDs = pIDs + } + r.DescribeTopics = topics + + return nil +} + +func (r *DescribeLogDirsRequest) key() int16 { + return 35 +} + +func (r *DescribeLogDirsRequest) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + +func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go new file mode 100644 index 00000000..411da38a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go @@ -0,0 +1,229 @@ +package sarama + +import "time" + +type DescribeLogDirsResponse struct { + ThrottleTime time.Duration + + // Version 0 and 1 are equal + // The version number is bumped to indicate that on quota violation brokers send out responses before throttling. + Version int16 + + LogDirs []DescribeLogDirsResponseDirMetadata +} + +func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + + if err := pe.putArrayLength(len(r.LogDirs)); err != nil { + return err + } + + for _, dir := range r.LogDirs { + if err := dir.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + // Decode array of DescribeLogDirsResponseDirMetadata + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n) + for i := 0; i < n; i++ { + dir := DescribeLogDirsResponseDirMetadata{} + if err := dir.decode(pd, version); err != nil { + return err + } + r.LogDirs[i] = dir + } + + return nil +} + +func (r *DescribeLogDirsResponse) key() int16 { + return 35 +} + +func (r *DescribeLogDirsResponse) version() int16 { + return r.Version +} + +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + +func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} + +type DescribeLogDirsResponseDirMetadata struct { + ErrorCode KError + + // The absolute log directory path + Path string + Topics []DescribeLogDirsResponseTopic +} + +func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { + pe.putInt16(int16(r.ErrorCode)) + + if err := pe.putString(r.Path); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } + for _, topic := range r.Topics { + if err := topic.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error { + errCode, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(errCode) + + path, err := pd.getString() + if err != nil { + return err + } + r.Path = path + + // Decode array of DescribeLogDirsResponseTopic + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]DescribeLogDirsResponseTopic, n) + for i := 0; i < n; i++ { + t := DescribeLogDirsResponseTopic{} + + if err := t.decode(pd, version); err != nil { + return err + } + + r.Topics[i] = t + } + + return nil +} + +// DescribeLogDirsResponseTopic contains a topic's partitions descriptions +type DescribeLogDirsResponseTopic struct { + Topic string + Partitions []DescribeLogDirsResponsePartition +} + +func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { + if err := pe.putString(r.Topic); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } + for _, partition := range r.Partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error { + t, err := pd.getString() + if err != nil { + return err + } + r.Topic = t + + n, err := pd.getArrayLength() + if err != nil { + return err + } + r.Partitions = make([]DescribeLogDirsResponsePartition, n) + for i := 0; i < n; i++ { + p := DescribeLogDirsResponsePartition{} + if err := p.decode(pd, version); err != nil { + return err + } + r.Partitions[i] = p + } + + return nil +} + +// DescribeLogDirsResponsePartition describes a partition's log directory +type DescribeLogDirsResponsePartition struct { + PartitionID int32 + + // The size of the log segments of the partition in bytes. + Size int64 + + // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or + // current replica's LEO (if it is the future log for the partition) + OffsetLag int64 + + // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of + // the replica in the future. + IsTemporary bool +} + +func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error { + pe.putInt32(r.PartitionID) + pe.putInt64(r.Size) + pe.putInt64(r.OffsetLag) + pe.putBool(r.IsTemporary) + + return nil +} + +func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error { + pID, err := pd.getInt32() + if err != nil { + return err + } + r.PartitionID = pID + + size, err := pd.getInt64() + if err != nil { + return err + } + r.Size = size + + lag, err := pd.getInt64() + if err != nil { + return err + } + r.OffsetLag = lag + + isTemp, err := pd.getBool() + if err != nil { + return err + } + r.IsTemporary = isTemp + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml index e014316f..57b2d3ca 100644 --- a/vendor/github.com/Shopify/sarama/dev.yml +++ b/vendor/github.com/Shopify/sarama/dev.yml @@ -1,13 +1,10 @@ name: sarama up: - - go: 1.7.3 + - go: + version: '1.14' commands: test: run: make test desc: 'run unit tests' - -packages: - - git@github.com:Shopify/dev-shopify.git - diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go index 7ce3bc0f..025bad61 100644 --- a/vendor/github.com/Shopify/sarama/encoder_decoder.go +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -12,6 +12,11 @@ type encoder interface { encode(pe packetEncoder) error } +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + // Encode takes an Encoder and turns it into bytes while potentially recording metrics. func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go new file mode 100644 index 00000000..6635425d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_request.go @@ -0,0 +1,54 @@ +package sarama + +type EndTxnRequest struct { + TransactionalID string + ProducerID int64 + ProducerEpoch int16 + TransactionResult bool +} + +func (a *EndTxnRequest) encode(pe packetEncoder) error { + if err := pe.putString(a.TransactionalID); err != nil { + return err + } + + pe.putInt64(a.ProducerID) + + pe.putInt16(a.ProducerEpoch) + + pe.putBool(a.TransactionResult) + + return nil +} + +func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) { + if a.TransactionalID, err = pd.getString(); err != nil { + return err + } + if a.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if a.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + if a.TransactionResult, err = pd.getBool(); err != nil { + return err + } + return nil +} + +func (a *EndTxnRequest) key() int16 { + return 26 +} + +func (a *EndTxnRequest) version() int16 { + return 0 +} + +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + +func (a *EndTxnRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go new file mode 100644 index 00000000..76397672 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/end_txn_response.go @@ -0,0 +1,48 @@ +package sarama + +import ( + "time" +) + +type EndTxnResponse struct { + ThrottleTime time.Duration + Err KError +} + +func (e *EndTxnResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(e.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(e.Err)) + return nil +} + +func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + e.Err = KError(kerr) + + return nil +} + +func (e *EndTxnResponse) key() int16 { + return 25 +} + +func (e *EndTxnResponse) version() int16 { + return 0 +} + +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + +func (e *EndTxnResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go index cc3f623d..ca621b09 100644 --- a/vendor/github.com/Shopify/sarama/errors.go +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -37,6 +37,18 @@ var ErrShuttingDown = errors.New("kafka: message received by producer in process // ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") +// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing +// a RecordBatch. +var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch") + +// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version +// is lower than 0.10.0.0. +var ErrControllerNotAvailable = errors.New("kafka: controller is not available") + +// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update +// the metadata. +var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata") + // PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, // if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. type PacketEncodingError struct { @@ -69,51 +81,135 @@ func (err ConfigurationError) Error() string { // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes type KError int16 +// MultiError is used to contain multi error. +type MultiError struct { + Errors *[]error +} + +func (mErr MultiError) Error() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "," + } + return errString +} + +func (mErr MultiError) PrettyError() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + +// ErrDeleteRecords is the type of error returned when fail to delete the required records +type ErrDeleteRecords struct { + MultiError +} + +func (err ErrDeleteRecords) Error() string { + return "kafka server: failed to delete records " + err.MultiError.Error() +} + +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + // Numeric error codes returned by the Kafka server. const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrNetworkException KError = 13 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 - ErrInvalidRequiredAcks KError = 21 - ErrIllegalGeneration KError = 22 - ErrInconsistentGroupProtocol KError = 23 - ErrInvalidGroupId KError = 24 - ErrUnknownMemberId KError = 25 - ErrInvalidSessionTimeout KError = 26 - ErrRebalanceInProgress KError = 27 - ErrInvalidCommitOffsetSize KError = 28 - ErrTopicAuthorizationFailed KError = 29 - ErrGroupAuthorizationFailed KError = 30 - ErrClusterAuthorizationFailed KError = 31 - ErrInvalidTimestamp KError = 32 - ErrUnsupportedSASLMechanism KError = 33 - ErrIllegalSASLState KError = 34 - ErrUnsupportedVersion KError = 35 - ErrUnsupportedForMessageFormat KError = 43 + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 + ErrOutOfOrderSequenceNumber KError = 45 + ErrDuplicateSequenceNumber KError = 46 + ErrInvalidProducerEpoch KError = 47 + ErrInvalidTxnState KError = 48 + ErrInvalidProducerIDMapping KError = 49 + ErrInvalidTransactionTimeout KError = 50 + ErrConcurrentTransactions KError = 51 + ErrTransactionCoordinatorFenced KError = 52 + ErrTransactionalIDAuthorizationFailed KError = 53 + ErrSecurityDisabled KError = 54 + ErrOperationNotAttempted KError = 55 + ErrKafkaStorageError KError = 56 + ErrLogDirNotFound KError = 57 + ErrSASLAuthenticationFailed KError = 58 + ErrUnknownProducerID KError = 59 + ErrReassignmentInProgress KError = 60 + ErrDelegationTokenAuthDisabled KError = 61 + ErrDelegationTokenNotFound KError = 62 + ErrDelegationTokenOwnerMismatch KError = 63 + ErrDelegationTokenRequestNotAllowed KError = 64 + ErrDelegationTokenAuthorizationFailed KError = 65 + ErrDelegationTokenExpired KError = 66 + ErrInvalidPrincipalType KError = 67 + ErrNonEmptyGroup KError = 68 + ErrGroupIDNotFound KError = 69 + ErrFetchSessionIDNotFound KError = 70 + ErrInvalidFetchSessionEpoch KError = 71 + ErrListenerNotFound KError = 72 + ErrTopicDeletionDisabled KError = 73 + ErrFencedLeaderEpoch KError = 74 + ErrUnknownLeaderEpoch KError = 75 + ErrUnsupportedCompressionType KError = 76 + ErrStaleBrokerEpoch KError = 77 + ErrOffsetNotAvailable KError = 78 + ErrMemberIdRequired KError = 79 + ErrPreferredLeaderNotAvailable KError = 80 + ErrGroupMaxSizeReached KError = 81 + ErrFencedInstancedId KError = 82 ) func (err KError) Error() string { // Error messages stolen/adapted from - // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + // https://kafka.apache.org/protocol#protocol_error_codes switch err { case ErrNoError: return "kafka server: Not an error, why are you printing me?" @@ -189,8 +285,100 @@ func (err KError) Error() string { return "kafka server: Request is not valid given the current SASL state." case ErrUnsupportedVersion: return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." case ErrUnsupportedForMessageFormat: return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + case ErrOutOfOrderSequenceNumber: + return "kafka server: The broker received an out of order sequence number." + case ErrDuplicateSequenceNumber: + return "kafka server: The broker received a duplicate sequence number." + case ErrInvalidProducerEpoch: + return "kafka server: Producer attempted an operation with an old epoch." + case ErrInvalidTxnState: + return "kafka server: The producer attempted a transactional operation in an invalid state." + case ErrInvalidProducerIDMapping: + return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id." + case ErrInvalidTransactionTimeout: + return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)." + case ErrConcurrentTransactions: + return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing." + case ErrTransactionCoordinatorFenced: + return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer." + case ErrTransactionalIDAuthorizationFailed: + return "kafka server: Transactional ID authorization failed." + case ErrSecurityDisabled: + return "kafka server: Security features are disabled." + case ErrOperationNotAttempted: + return "kafka server: The broker did not attempt to execute this operation." + case ErrKafkaStorageError: + return "kafka server: Disk error when trying to access log file on the disk." + case ErrLogDirNotFound: + return "kafka server: The specified log directory is not found in the broker config." + case ErrSASLAuthenticationFailed: + return "kafka server: SASL Authentication failed." + case ErrUnknownProducerID: + return "kafka server: The broker could not locate the producer metadata associated with the Producer ID." + case ErrReassignmentInProgress: + return "kafka server: A partition reassignment is in progress." + case ErrDelegationTokenAuthDisabled: + return "kafka server: Delegation Token feature is not enabled." + case ErrDelegationTokenNotFound: + return "kafka server: Delegation Token is not found on server." + case ErrDelegationTokenOwnerMismatch: + return "kafka server: Specified Principal is not valid Owner/Renewer." + case ErrDelegationTokenRequestNotAllowed: + return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels." + case ErrDelegationTokenAuthorizationFailed: + return "kafka server: Delegation Token authorization failed." + case ErrDelegationTokenExpired: + return "kafka server: Delegation Token is expired." + case ErrInvalidPrincipalType: + return "kafka server: Supplied principalType is not supported." + case ErrNonEmptyGroup: + return "kafka server: The group is not empty." + case ErrGroupIDNotFound: + return "kafka server: The group id does not exist." + case ErrFetchSessionIDNotFound: + return "kafka server: The fetch session ID was not found." + case ErrInvalidFetchSessionEpoch: + return "kafka server: The fetch session epoch is invalid." + case ErrListenerNotFound: + return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed." + case ErrTopicDeletionDisabled: + return "kafka server: Topic deletion is disabled." + case ErrFencedLeaderEpoch: + return "kafka server: The leader epoch in the request is older than the epoch on the broker." + case ErrUnknownLeaderEpoch: + return "kafka server: The leader epoch in the request is newer than the epoch on the broker." + case ErrUnsupportedCompressionType: + return "kafka server: The requesting client does not support the compression type of given partition." + case ErrStaleBrokerEpoch: + return "kafka server: Broker epoch has changed" + case ErrOffsetNotAvailable: + return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing" + case ErrMemberIdRequired: + return "kafka server: The group member needs to have a valid member id before actually entering a consumer group" + case ErrPreferredLeaderNotAvailable: + return "kafka server: The preferred leader was not available" + case ErrGroupMaxSizeReached: + return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members." + case ErrFencedInstancedId: + return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id." } return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go index ae701a3f..f893aeff 100644 --- a/vendor/github.com/Shopify/sarama/fetch_request.go +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -1,37 +1,84 @@ package sarama type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 + Version int16 + currentLeaderEpoch int32 + fetchOffset int64 + logStartOffset int64 + maxBytes int32 } -func (b *fetchRequestBlock) encode(pe packetEncoder) error { +func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error { + b.Version = version + if b.Version >= 9 { + pe.putInt32(b.currentLeaderEpoch) + } pe.putInt64(b.fetchOffset) + if b.Version >= 5 { + pe.putInt64(b.logStartOffset) + } pe.putInt32(b.maxBytes) return nil } -func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { +func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) { + b.Version = version + if b.Version >= 9 { + if b.currentLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + } if b.fetchOffset, err = pd.getInt64(); err != nil { return err } + if b.Version >= 5 { + if b.logStartOffset, err = pd.getInt64(); err != nil { + return err + } + } if b.maxBytes, err = pd.getInt32(); err != nil { return err } return nil } +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - Version int16 - blocks map[string]map[int32]*fetchRequestBlock + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + Isolation IsolationLevel + SessionID int32 + SessionEpoch int32 + blocks map[string]map[int32]*fetchRequestBlock + forgotten map[string][]int32 + RackID string } +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = iota + ReadCommitted +) + func (r *FetchRequest) encode(pe packetEncoder) (err error) { pe.putInt32(-1) // replica ID is always -1 for clients pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MinBytes) + if r.Version >= 3 { + pe.putInt32(r.MaxBytes) + } + if r.Version >= 4 { + pe.putInt8(int8(r.Isolation)) + } + if r.Version >= 7 { + pe.putInt32(r.SessionID) + pe.putInt32(r.SessionEpoch) + } err = pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -47,17 +94,44 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) { } for partition, block := range blocks { pe.putInt32(partition) - err = block.encode(pe) + err = block.encode(pe, r.Version) + if err != nil { + return err + } + } + } + if r.Version >= 7 { + err = pe.putArrayLength(len(r.forgotten)) + if err != nil { + return err + } + for topic, partitions := range r.forgotten { + err = pe.putString(topic) if err != nil { return err } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for _, partition := range partitions { + pe.putInt32(partition) + } + } + } + if r.Version >= 11 { + err = pe.putString(r.RackID) + if err != nil { + return err } } + return nil } func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { r.Version = version + if _, err = pd.getInt32(); err != nil { return err } @@ -67,6 +141,28 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { if r.MinBytes, err = pd.getInt32(); err != nil { return err } + if r.Version >= 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + if r.Version >= 4 { + isolation, err := pd.getInt8() + if err != nil { + return err + } + r.Isolation = IsolationLevel(isolation) + } + if r.Version >= 7 { + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + r.SessionEpoch, err = pd.getInt32() + if err != nil { + return err + } + } topicCount, err := pd.getArrayLength() if err != nil { return err @@ -91,12 +187,47 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { return err } fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { - return nil + if err = fetchBlock.decode(pd, r.Version); err != nil { + return err } r.blocks[topic][partition] = fetchBlock } } + + if r.Version >= 7 { + forgottenCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten = make(map[string][]int32) + for i := 0; i < forgottenCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.forgotten[topic] = make([]int32, partitionCount) + + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.forgotten[topic][j] = partition + } + } + } + + if r.Version >= 11 { + r.RackID, err = pd.getString() + if err != nil { + return err + } + } + return nil } @@ -108,14 +239,34 @@ func (r *FetchRequest) version() int16 { return r.Version } +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return minVersion + return MaxVersion } } @@ -124,13 +275,21 @@ func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int r.blocks = make(map[string]map[int32]*fetchRequestBlock) } + if r.Version >= 7 && r.forgotten == nil { + r.forgotten = make(map[string][]int32) + } + if r.blocks[topic] == nil { r.blocks[topic] = make(map[int32]*fetchRequestBlock) } tmp := new(fetchRequestBlock) + tmp.Version = r.Version tmp.maxBytes = maxBytes tmp.fetchOffset = fetchOffset + if r.Version >= 9 { + tmp.currentLeaderEpoch = int32(-1) + } r.blocks[topic][partitionID] = tmp } diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go index b56b166c..ca6d7883 100644 --- a/vendor/github.com/Shopify/sarama/fetch_response.go +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -1,14 +1,47 @@ package sarama -import "time" +import ( + "sort" + "time" +) + +type AbortedTransaction struct { + ProducerID int64 + FirstOffset int64 +} + +func (t *AbortedTransaction) decode(pd packetDecoder) (err error) { + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if t.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + return nil +} + +func (t *AbortedTransaction) encode(pe packetEncoder) (err error) { + pe.putInt64(t.ProducerID) + pe.putInt64(t.FirstOffset) + + return nil +} type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - MsgSet MessageSet + Err KError + HighWaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + AbortedTransactions []*AbortedTransaction + PreferredReadReplica int32 + Records *Records // deprecated: use FetchResponseBlock.RecordsSet + RecordsSet []*Records + Partial bool } -func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { +func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err @@ -20,37 +53,182 @@ func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { return err } - msgSetSize, err := pd.getInt32() + if version >= 4 { + b.LastStableOffset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 5 { + b.LogStartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + numTransact, err := pd.getArrayLength() + if err != nil { + return err + } + + if numTransact >= 0 { + b.AbortedTransactions = make([]*AbortedTransaction, numTransact) + } + + for i := 0; i < numTransact; i++ { + transact := new(AbortedTransaction) + if err = transact.decode(pd); err != nil { + return err + } + b.AbortedTransactions[i] = transact + } + } + + if version >= 11 { + b.PreferredReadReplica, err = pd.getInt32() + if err != nil { + return err + } + } + + recordsSize, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + recordsDecoder, err := pd.getSubset(int(recordsSize)) if err != nil { return err } - err = (&b.MsgSet).decode(msgSetDecoder) - return err + b.RecordsSet = []*Records{} + + for recordsDecoder.remaining() > 0 { + records := &Records{} + if err := records.decode(recordsDecoder); err != nil { + // If we have at least one decoded records, this is not an error + if err == ErrInsufficientData { + if len(b.RecordsSet) == 0 { + b.Partial = true + } + break + } + return err + } + + partial, err := records.isPartial() + if err != nil { + return err + } + + n, err := records.numRecords() + if err != nil { + return err + } + + if n > 0 || (partial && len(b.RecordsSet) == 0) { + b.RecordsSet = append(b.RecordsSet, records) + + if b.Records == nil { + b.Records = records + } + } + + overflow, err := records.isOverflow() + if err != nil { + return err + } + + if partial || overflow { + break + } + } + + return nil +} + +func (b *FetchResponseBlock) numRecords() (int, error) { + sum := 0 + + for _, records := range b.RecordsSet { + count, err := records.numRecords() + if err != nil { + return 0, err + } + + sum += count + } + + return sum, nil +} + +func (b *FetchResponseBlock) isPartial() (bool, error) { + if b.Partial { + return true, nil + } + + if len(b.RecordsSet) == 1 { + return b.RecordsSet[0].isPartial() + } + + return false, nil } -func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) { +func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(b.Err)) pe.putInt64(b.HighWaterMarkOffset) + if version >= 4 { + pe.putInt64(b.LastStableOffset) + + if version >= 5 { + pe.putInt64(b.LogStartOffset) + } + + if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil { + return err + } + for _, transact := range b.AbortedTransactions { + if err = transact.encode(pe); err != nil { + return err + } + } + } + + if version >= 11 { + pe.putInt32(b.PreferredReadReplica) + } + pe.push(&lengthField{}) - err = b.MsgSet.encode(pe) - if err != nil { - return err + for _, records := range b.RecordsSet { + err = records.encode(pe) + if err != nil { + return err + } } return pe.pop() } +func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction { + // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered + // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself + at := b.AbortedTransactions + sort.Slice( + at, + func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset }, + ) + return at +} + type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock - ThrottleTime time.Duration - Version int16 // v1 requires 0.9+, v2 requires 0.10+ + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + ErrorCode int16 + SessionID int32 + Version int16 + LogAppendTime bool + Timestamp time.Time } func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { @@ -64,6 +242,17 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { r.ThrottleTime = time.Duration(throttle) * time.Millisecond } + if r.Version >= 7 { + r.ErrorCode, err = pd.getInt16() + if err != nil { + return err + } + r.SessionID, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil { return err @@ -90,7 +279,7 @@ func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { } block := new(FetchResponseBlock) - err = block.decode(pd) + err = block.decode(pd, version) if err != nil { return err } @@ -106,6 +295,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } + if r.Version >= 7 { + pe.putInt16(r.ErrorCode) + pe.putInt32(r.SessionID) + } + err = pe.putArrayLength(len(r.Blocks)) if err != nil { return err @@ -124,12 +318,11 @@ func (r *FetchResponse) encode(pe packetEncoder) (err error) { for id, block := range partitions { pe.putInt32(id) - err = block.encode(pe) + err = block.encode(pe, r.Version) if err != nil { return err } } - } return nil } @@ -142,14 +335,34 @@ func (r *FetchResponse) version() int16 { return r.Version } +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { + case 0: + return MinVersion case 1: return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_10_1_0 + case 4, 5: + return V0_11_0_0 + case 6: + return V1_0_0_0 + case 7: + return V1_1_0_0 + case 8: + return V2_0_0_0 + case 9, 10: + return V2_1_0_0 + case 11: + return V2_3_0_0 default: - return minVersion + return MaxVersion } } @@ -182,7 +395,7 @@ func (r *FetchResponse) AddError(topic string, partition int32, err KError) { frb.Err = err } -func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { +func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock { if r.Blocks == nil { r.Blocks = make(map[string]map[int32]*FetchResponseBlock) } @@ -196,6 +409,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc frb = new(FetchResponseBlock) partitions[partition] = frb } + + return frb +} + +func encodeKV(key, value Encoder) ([]byte, []byte) { var kb []byte var vb []byte if key != nil { @@ -204,7 +422,125 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc if value != nil { vb, _ = value.Encode() } - msg := &Message{Key: kb, Value: vb} + + return kb, vb +} + +func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if r.LogAppendTime { + timestamp = r.Timestamp + } + msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version} msgBlock := &MessageBlock{Msg: msg, Offset: offset} - frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) + if len(frb.RecordsSet) == 0 { + records := newLegacyRecords(&MessageSet{}) + frb.RecordsSet = []*Records{&records} + } + set := frb.RecordsSet[0].MsgSet + set.Messages = append(set.Messages, msgBlock) +} + +func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) +} + +// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp +// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse +// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions +func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + kb, vb := encodeKV(key, value) + + records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp}) + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: isTransactional, + } + rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + records.RecordBatch = batch + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) { + frb := r.getOrCreateBlock(topic, partition) + + // batch + batch := &RecordBatch{ + Version: 2, + LogAppendTime: r.LogAppendTime, + FirstTimestamp: timestamp, + MaxTimestamp: r.Timestamp, + FirstOffset: offset, + LastOffsetDelta: 0, + ProducerID: producerID, + IsTransactional: true, + Control: true, + } + + // records + records := newDefaultRecords(nil) + records.RecordBatch = batch + + // record + crAbort := ControlRecord{ + Version: 0, + Type: recordType, + } + crKey := &realEncoder{raw: make([]byte, 4)} + crValue := &realEncoder{raw: make([]byte, 6)} + crAbort.encode(crKey, crValue) + rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)} + batch.addRecord(rec) + + frb.RecordsSet = append(frb.RecordsSet, &records) +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0) +} + +func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) { + r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{}) +} + +func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) { + r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{}) +} + +func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) { + // define controlRecord key and value + r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{}) +} + +func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) { + frb := r.getOrCreateBlock(topic, partition) + if len(frb.RecordsSet) == 0 { + records := newDefaultRecords(&RecordBatch{Version: 2}) + frb.RecordsSet = []*Records{&records} + } + batch := frb.RecordsSet[0].RecordBatch + batch.LastOffsetDelta = offset +} + +func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) { + frb := r.getOrCreateBlock(topic, partition) + frb.LastStableOffset = offset } diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go new file mode 100644 index 00000000..597bcbf7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go @@ -0,0 +1,65 @@ +package sarama + +type CoordinatorType int8 + +const ( + CoordinatorGroup CoordinatorType = iota + CoordinatorTransaction +) + +type FindCoordinatorRequest struct { + Version int16 + CoordinatorKey string + CoordinatorType CoordinatorType +} + +func (f *FindCoordinatorRequest) encode(pe packetEncoder) error { + if err := pe.putString(f.CoordinatorKey); err != nil { + return err + } + + if f.Version >= 1 { + pe.putInt8(int8(f.CoordinatorType)) + } + + return nil +} + +func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) { + if f.CoordinatorKey, err = pd.getString(); err != nil { + return err + } + + if version >= 1 { + f.Version = version + coordinatorType, err := pd.getInt8() + if err != nil { + return err + } + + f.CoordinatorType = CoordinatorType(coordinatorType) + } + + return nil +} + +func (f *FindCoordinatorRequest) key() int16 { + return 10 +} + +func (f *FindCoordinatorRequest) version() int16 { + return f.Version +} + +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + +func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go new file mode 100644 index 00000000..83a648ad --- /dev/null +++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go @@ -0,0 +1,96 @@ +package sarama + +import ( + "time" +) + +var NoNode = &Broker{id: -1, addr: ":-1"} + +type FindCoordinatorResponse struct { + Version int16 + ThrottleTime time.Duration + Err KError + ErrMsg *string + Coordinator *Broker +} + +func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) { + if version >= 1 { + f.Version = version + + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + f.Err = KError(tmp) + + if version >= 1 { + if f.ErrMsg, err = pd.getNullableString(); err != nil { + return err + } + } + + coordinator := new(Broker) + // The version is hardcoded to 0, as version 1 of the Broker-decode + // contains the rack-field which is not present in the FindCoordinatorResponse. + if err := coordinator.decode(pd, 0); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + f.Coordinator = coordinator + + return nil +} + +func (f *FindCoordinatorResponse) encode(pe packetEncoder) error { + if f.Version >= 1 { + pe.putInt32(int32(f.ThrottleTime / time.Millisecond)) + } + + pe.putInt16(int16(f.Err)) + + if f.Version >= 1 { + if err := pe.putNullableString(f.ErrMsg); err != nil { + return err + } + } + + coordinator := f.Coordinator + if coordinator == nil { + coordinator = NoNode + } + if err := coordinator.encode(pe, 0); err != nil { + return err + } + return nil +} + +func (f *FindCoordinatorResponse) key() int16 { + return 10 +} + +func (f *FindCoordinatorResponse) version() int16 { + return f.Version +} + +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + +func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { + switch f.Version { + case 1: + return V0_11_0_0 + default: + return V0_8_2_0 + } +} diff --git a/vendor/github.com/Shopify/sarama/go.mod b/vendor/github.com/Shopify/sarama/go.mod new file mode 100644 index 00000000..1dca1cc3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.mod @@ -0,0 +1,34 @@ +module github.com/Shopify/sarama + +go 1.13 + +require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible + github.com/davecgh/go-spew v1.1.1 + github.com/eapache/go-resiliency v1.2.0 + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 + github.com/eapache/queue v1.1.0 + github.com/fortytw2/leaktest v1.3.0 + github.com/frankban/quicktest v1.7.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/google/go-cmp v0.4.0 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/klauspost/compress v1.9.8 + github.com/kr/pretty v0.2.0 // indirect + github.com/pierrec/lz4 v2.4.1+incompatible + github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 + github.com/stretchr/testify v1.4.0 + github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c + github.com/xdg/stringprep v1.0.0 // indirect + golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 // indirect + golang.org/x/net v0.0.0-20200202094626-16171245cfb2 + golang.org/x/text v0.3.2 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.5.0 + gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/vendor/github.com/Shopify/sarama/go.sum b/vendor/github.com/Shopify/sarama/go.sum new file mode 100644 index 00000000..06ec3280 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/go.sum @@ -0,0 +1,81 @@ +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= +github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= +gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go new file mode 100644 index 00000000..98be0f50 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go @@ -0,0 +1,257 @@ +package sarama + +import ( + "encoding/asn1" + "encoding/binary" + "fmt" + "io" + "strings" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + TOK_ID_KRB_AP_REQ = 256 + GSS_API_GENERIC_TAG = 0x60 + KRB5_USER_AUTH = 1 + KRB5_KEYTAB_AUTH = 2 + GSS_API_INITIAL = 1 + GSS_API_VERIFY = 2 + GSS_API_FINISH = 3 +) + +type GSSAPIConfig struct { + AuthType int + KeyTabPath string + KerberosConfigPath string + ServiceName string + Username string + Password string + Realm string +} + +type GSSAPIKerberosAuth struct { + Config *GSSAPIConfig + ticket messages.Ticket + encKey types.EncryptionKey + NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error) + step int +} + +type KerberosClient interface { + Login() error + GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) + Domain() string + CName() types.PrincipalName + Destroy() +} + +/* +* +* Appends length in big endian before payload, and send it to kafka +* + */ + +func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) { + length := len(payload) + finalPackage := make([]byte, length+4) //4 byte length header + payload + copy(finalPackage[4:], payload) + binary.BigEndian.PutUint32(finalPackage, uint32(length)) + bytes, err := broker.conn.Write(finalPackage) + if err != nil { + return bytes, err + } + return bytes, nil +} + +/* +* +* Read length (4 bytes) and then read the payload +* + */ + +func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) { + bytesRead := 0 + lengthInBytes := make([]byte, 4) + bytes, err := io.ReadFull(broker.conn, lengthInBytes) + if err != nil { + return nil, bytesRead, err + } + bytesRead += bytes + payloadLength := binary.BigEndian.Uint32(lengthInBytes) + payloadBytes := make([]byte, payloadLength) // buffer for read.. + bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes + if err != nil { + return payloadBytes, bytesRead, err + } + bytesRead += bytes + return payloadBytes, bytesRead, nil +} + +func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte { + a := make([]byte, 24) + flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf} + binary.LittleEndian.PutUint32(a[:4], 16) + for _, i := range flags { + f := binary.LittleEndian.Uint32(a[20:24]) + f |= uint32(i) + binary.LittleEndian.PutUint32(a[20:24], f) + } + return a +} + +/* +* +* Construct Kerberos AP_REQ package, conforming to RFC-4120 +* https://tools.ietf.org/html/rfc4120#page-84 +* + */ +func (krbAuth *GSSAPIKerberosAuth) createKrb5Token( + domain string, cname types.PrincipalName, + ticket messages.Ticket, + sessionKey types.EncryptionKey) ([]byte, error) { + auth, err := types.NewAuthenticator(domain, cname) + if err != nil { + return nil, err + } + auth.Cksum = types.Checksum{ + CksumType: chksumtype.GSSAPI, + Checksum: krbAuth.newAuthenticatorChecksum(), + } + APReq, err := messages.NewAPReq( + ticket, + sessionKey, + auth, + ) + if err != nil { + return nil, err + } + aprBytes := make([]byte, 2) + binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ) + tb, err := APReq.Marshal() + if err != nil { + return nil, err + } + aprBytes = append(aprBytes, tb...) + return aprBytes, nil +} + +/* +* +* Append the GSS-API header to the payload, conforming to RFC-2743 +* Section 3.1, Mechanism-Independent Token Format +* +* https://tools.ietf.org/html/rfc2743#page-81 +* +* GSSAPIHeader + +* + */ +func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) { + oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5)) + if err != nil { + return nil, err + } + tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload)) + GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...) + GSSHeader = append(GSSHeader, oidBytes...) + GSSPackage := append(GSSHeader, payload...) + return GSSPackage, nil +} + +func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) { + switch krbAuth.step { + case GSS_API_INITIAL: + aprBytes, err := krbAuth.createKrb5Token( + kerberosClient.Domain(), + kerberosClient.CName(), + krbAuth.ticket, + krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_VERIFY + return krbAuth.appendGSSAPIHeader(aprBytes) + case GSS_API_VERIFY: + wrapTokenReq := gssapi.WrapToken{} + if err := wrapTokenReq.Unmarshal(bytes, true); err != nil { + return nil, err + } + // Validate response. + isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL) + if !isValid { + return nil, err + } + + wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey) + if err != nil { + return nil, err + } + krbAuth.step = GSS_API_FINISH + return wrapTokenResponse.Marshal() + } + return nil, nil +} + +/* This does the handshake for authorization */ +func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error { + kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config) + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + + err = kerberosClient.Login() + if err != nil { + Logger.Printf("Kerberos client error: %s", err) + return err + } + // Construct SPN using serviceName and host + // SPN format: / + + host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part + spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host) + + ticket, encKey, err := kerberosClient.GetServiceTicket(spn) + + if err != nil { + Logger.Printf("Error getting Kerberos service ticket : %s", err) + return err + } + krbAuth.ticket = ticket + krbAuth.encKey = encKey + krbAuth.step = GSS_API_INITIAL + var receivedBytes []byte = nil + defer kerberosClient.Destroy() + for { + packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + requestTime := time.Now() + bytesWritten, err := krbAuth.writePackage(broker, packBytes) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + broker.updateOutgoingCommunicationMetrics(bytesWritten) + if krbAuth.step == GSS_API_VERIFY { + bytesRead := 0 + receivedBytes, bytesRead, err = krbAuth.readPackage(broker) + requestLatency := time.Since(requestTime) + broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency) + if err != nil { + Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err) + return err + } + } else if krbAuth.step == GSS_API_FINISH { + return nil + } + } +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go index ce49c473..e9d9af19 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_request.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 { return 0 } +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go index 3c51163a..577ab72e 100644 --- a/vendor/github.com/Shopify/sarama/heartbeat_response.go +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -10,11 +10,11 @@ func (r *HeartbeatResponse) encode(pe packetEncoder) error { } func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) return nil } @@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 { return 0 } +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go new file mode 100644 index 00000000..68944439 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go @@ -0,0 +1,47 @@ +package sarama + +import "time" + +type InitProducerIDRequest struct { + TransactionalID *string + TransactionTimeout time.Duration +} + +func (i *InitProducerIDRequest) encode(pe packetEncoder) error { + if err := pe.putNullableString(i.TransactionalID); err != nil { + return err + } + pe.putInt32(int32(i.TransactionTimeout / time.Millisecond)) + + return nil +} + +func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) { + if i.TransactionalID, err = pd.getNullableString(); err != nil { + return err + } + + timeout, err := pd.getInt32() + if err != nil { + return err + } + i.TransactionTimeout = time.Duration(timeout) * time.Millisecond + + return nil +} + +func (i *InitProducerIDRequest) key() int16 { + return 22 +} + +func (i *InitProducerIDRequest) version() int16 { + return 0 +} + +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + +func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go new file mode 100644 index 00000000..3e1242bf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go @@ -0,0 +1,59 @@ +package sarama + +import "time" + +type InitProducerIDResponse struct { + ThrottleTime time.Duration + Err KError + ProducerID int64 + ProducerEpoch int16 +} + +func (i *InitProducerIDResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(i.ThrottleTime / time.Millisecond)) + pe.putInt16(int16(i.Err)) + pe.putInt64(i.ProducerID) + pe.putInt16(i.ProducerEpoch) + + return nil +} + +func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + kerr, err := pd.getInt16() + if err != nil { + return err + } + i.Err = KError(kerr) + + if i.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if i.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +func (i *InitProducerIDResponse) key() int16 { + return 22 +} + +func (i *InitProducerIDResponse) version() int16 { + return 0 +} + +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + +func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go index 656db456..3734e82e 100644 --- a/vendor/github.com/Shopify/sarama/join_group_request.go +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -1,11 +1,38 @@ package sarama +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + type JoinGroupRequest struct { - GroupId string - SessionTimeout int32 - MemberId string - ProtocolType string - GroupProtocols map[string][]byte + Version int16 + GroupId string + SessionTimeout int32 + RebalanceTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol } func (r *JoinGroupRequest) encode(pe packetEncoder) error { @@ -13,6 +40,9 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { return err } pe.putInt32(r.SessionTimeout) + if r.Version >= 1 { + pe.putInt32(r.RebalanceTimeout) + } if err := pe.putString(r.MemberId); err != nil { return err } @@ -20,22 +50,39 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error { return err } - if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { - return err - } - for name, metadata := range r.GroupProtocols { - if err := pe.putString(name); err != nil { + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { return err } - if err := pe.putBytes(metadata); err != nil { + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { return err } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } } return nil } func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.GroupId, err = pd.getString(); err != nil { return } @@ -44,6 +91,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { return } + if version >= 1 { + if r.RebalanceTimeout, err = pd.getInt32(); err != nil { + return err + } + } + if r.MemberId, err = pd.getString(); err != nil { return } @@ -62,16 +115,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { r.GroupProtocols = make(map[string][]byte) for i := 0; i < n; i++ { - name, err := pd.getString() - if err != nil { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { return err } - metadata, err := pd.getBytes() - if err != nil { - return err - } - - r.GroupProtocols[name] = metadata + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) } return nil @@ -82,19 +131,29 @@ func (r *JoinGroupRequest) key() int16 { } func (r *JoinGroupRequest) version() int16 { - return 0 + return r.Version } -func (r *JoinGroupRequest) requiredVersion() KafkaVersion { - return V0_9_0_0 +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 } -func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { - if r.GroupProtocols == nil { - r.GroupProtocols = make(map[string][]byte) +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 } +} - r.GroupProtocols[name] = metadata +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) } func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go index 94c7a7fd..54b0a45c 100644 --- a/vendor/github.com/Shopify/sarama/join_group_response.go +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -1,6 +1,8 @@ package sarama type JoinGroupResponse struct { + Version int16 + ThrottleTime int32 Err KError GenerationId int32 GroupProtocol string @@ -22,6 +24,9 @@ func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata } func (r *JoinGroupResponse) encode(pe packetEncoder) error { + if r.Version >= 2 { + pe.putInt32(r.ThrottleTime) + } pe.putInt16(int16(r.Err)) pe.putInt32(r.GenerationId) @@ -53,12 +58,21 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error { } func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { + r.Version = version + + if version >= 2 { + if r.ThrottleTime, err = pd.getInt32(); err != nil { + return + } + } + + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + if r.GenerationId, err = pd.getInt32(); err != nil { return } @@ -106,9 +120,20 @@ func (r *JoinGroupResponse) key() int16 { } func (r *JoinGroupResponse) version() int16 { + return r.Version +} + +func (r *JoinGroupResponse) headerVersion() int16 { return 0 } func (r *JoinGroupResponse) requiredVersion() KafkaVersion { - return V0_9_0_0 + switch r.Version { + case 2: + return V0_11_0_0 + case 1: + return V0_10_1_0 + default: + return V0_9_0_0 + } } diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go new file mode 100644 index 00000000..91b998f5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/kerberos_client.go @@ -0,0 +1,51 @@ +package sarama + +import ( + krb5client "gopkg.in/jcmturner/gokrb5.v7/client" + krb5config "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type KerberosGoKrb5Client struct { + krb5client.Client +} + +func (c *KerberosGoKrb5Client) Domain() string { + return c.Credentials.Domain() +} + +func (c *KerberosGoKrb5Client) CName() types.PrincipalName { + return c.Credentials.CName() +} + +/* +* +* Create kerberos client used to obtain TGT and TGS tokens +* used gokrb5 library, which is a pure go kerberos client with +* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO +* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120). +* + */ +func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { + cfg, err := krb5config.Load(config.KerberosConfigPath) + if err != nil { + return nil, err + } + return createClient(config, cfg) +} + +func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) { + var client *krb5client.Client + if config.AuthType == KRB5_KEYTAB_AUTH { + kt, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, err + } + client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg) + } else { + client = krb5client.NewClientWithPassword(config.Username, + config.Realm, config.Password, cfg) + } + return &KerberosGoKrb5Client{*client}, nil +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go index e1774274..d7789b68 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_request.go +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 { return 0 } +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go index bd4a34f4..25f8d5eb 100644 --- a/vendor/github.com/Shopify/sarama/leave_group_response.go +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -10,11 +10,11 @@ func (r *LeaveGroupResponse) encode(pe packetEncoder) error { } func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) return nil } @@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 { return 0 } +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go index 70078be5..7d864f6b 100644 --- a/vendor/github.com/Shopify/sarama/length_field.go +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -1,10 +1,40 @@ package sarama -import "encoding/binary" +import ( + "encoding/binary" + "sync" +) // LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. type lengthField struct { startOffset int + length int32 +} + +var lengthFieldPool = sync.Pool{} + +func acquireLengthField() *lengthField { + val := lengthFieldPool.Get() + if val != nil { + return val.(*lengthField) + } + return &lengthField{} +} + +func releaseLengthField(m *lengthField) { + lengthFieldPool.Put(m) +} + +func (l *lengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getInt32() + if err != nil { + return err + } + if l.length > int32(pd.remaining()) { + return ErrInsufficientData + } + return nil } func (l *lengthField) saveOffset(in int) { @@ -21,7 +51,47 @@ func (l *lengthField) run(curOffset int, buf []byte) error { } func (l *lengthField) check(curOffset int, buf []byte) error { - if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + if int32(curOffset-l.startOffset-4) != l.length { + return PacketDecodingError{"length field invalid"} + } + + return nil +} + +type varintLengthField struct { + startOffset int + length int64 +} + +func (l *varintLengthField) decode(pd packetDecoder) error { + var err error + l.length, err = pd.getVarint() + return err +} + +func (l *varintLengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *varintLengthField) adjustLength(currOffset int) int { + oldFieldSize := l.reserveLength() + l.length = int64(currOffset - l.startOffset - oldFieldSize) + + return l.reserveLength() - oldFieldSize +} + +func (l *varintLengthField) reserveLength() int { + var tmp [binary.MaxVarintLen64]byte + return binary.PutVarint(tmp[:], l.length) +} + +func (l *varintLengthField) run(curOffset int, buf []byte) error { + binary.PutVarint(buf[l.startOffset:], l.length) + return nil +} + +func (l *varintLengthField) check(curOffset int, buf []byte) error { + if int64(curOffset-l.startOffset-l.reserveLength()) != l.length { return PacketDecodingError{"length field invalid"} } diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go index 3b16abf7..ed44cc27 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_request.go +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 { return 0 } +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *ListGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go index 3a84f9b6..777bae7e 100644 --- a/vendor/github.com/Shopify/sarama/list_groups_response.go +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -24,12 +24,13 @@ func (r *ListGroupsResponse) encode(pe packetEncoder) error { } func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + n, err := pd.getArrayLength() if err != nil { return err @@ -63,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 { return 0 } +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *ListGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go new file mode 100644 index 00000000..c1ffa9ba --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go new file mode 100644 index 00000000..4baa6a08 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go index 327c5fa2..e48566b3 100644 --- a/vendor/github.com/Shopify/sarama/message.go +++ b/vendor/github.com/Shopify/sarama/message.go @@ -1,51 +1,77 @@ package sarama import ( - "bytes" - "compress/gzip" "fmt" - "io/ioutil" "time" +) - "github.com/eapache/go-xerial-snappy" - "github.com/pierrec/lz4" +const ( + //CompressionNone no compression + CompressionNone CompressionCodec = iota + //CompressionGZIP compression using GZIP + CompressionGZIP + //CompressionSnappy compression using snappy + CompressionSnappy + //CompressionLZ4 compression using LZ4 + CompressionLZ4 + //CompressionZSTD compression using ZSTD + CompressionZSTD + + // The lowest 3 bits contain the compression codec used for the message + compressionCodecMask int8 = 0x07 + + // Bit 3 set for "LogAppend" timestamps + timestampTypeMask = 0x08 + + // CompressionLevelDefault is the constant to use in CompressionLevel + // to have the default compression level for any codec. The value is picked + // that we don't use any existing compression levels. + CompressionLevelDefault = -1000 ) // CompressionCodec represents the various compression codecs recognized by Kafka in messages. type CompressionCodec int8 -// only the last two bits are really used -const compressionCodecMask int8 = 0x03 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 - CompressionLZ4 CompressionCodec = 3 -) +func (cc CompressionCodec) String() string { + return []string{ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + }[int(cc)] +} +//Message is a kafka message type type Message struct { - Codec CompressionCodec // codec used to compress the message contents - Key []byte // the message key, may be nil - Value []byte // the message contents - Set *MessageSet // the message set a message might wrap - Version int8 // v1 requires Kafka 0.10 - Timestamp time.Time // the timestamp of the message (version 1+ only) + Codec CompressionCodec // codec used to compress the message contents + CompressionLevel int // compression level + LogAppendTime bool // the used timestamp is LogAppendTime + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) compressedCache []byte compressedSize int // used for computing the compression ratio metrics } func (m *Message) encode(pe packetEncoder) error { - pe.push(&crc32Field{}) + pe.push(newCRC32Field(crcIEEE)) pe.putInt8(m.Version) attributes := int8(m.Codec) & compressionCodecMask + if m.LogAppendTime { + attributes |= timestampTypeMask + } pe.putInt8(attributes) if m.Version >= 1 { - pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond)) + if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil { + return err + } } err := pe.putBytes(m.Key) @@ -59,39 +85,11 @@ func (m *Message) encode(pe packetEncoder) error { payload = m.compressedCache m.compressedCache = nil } else if m.Value != nil { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - writer := gzip.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappy.Encode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - case CompressionLZ4: - var buf bytes.Buffer - writer := lz4.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + payload, err = compress(m.Codec, m.CompressionLevel, m.Value) + if err != nil { + return err } + m.compressedCache = payload // Keep in mind the compressed payload size for metric gathering m.compressedSize = len(payload) } @@ -104,7 +102,10 @@ func (m *Message) encode(pe packetEncoder) error { } func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(&crc32Field{}) + crc32Decoder := acquireCrc32Field(crcIEEE) + defer releaseCrc32Field(crc32Decoder) + + err = pd.push(crc32Decoder) if err != nil { return err } @@ -114,18 +115,21 @@ func (m *Message) decode(pd packetDecoder) (err error) { return err } + if m.Version > 1 { + return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)} + } + attribute, err := pd.getInt8() if err != nil { return err } m.Codec = CompressionCodec(attribute & compressionCodecMask) + m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask - if m.Version >= 1 { - millis, err := pd.getInt64() - if err != nil { + if m.Version == 1 { + if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil { return err } - m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) } m.Key, err = pd.getBytes() @@ -145,50 +149,24 @@ func (m *Message) decode(pd packetDecoder) (err error) { switch m.Codec { case CompressionNone: // nothing to do - case CompressionGZIP: + default: if m.Value == nil { break } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + + m.Value, err = decompress(m.Codec, m.Value) if err != nil { return err } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - case CompressionSnappy: - if m.Value == nil { - break - } - if m.Value, err = snappy.Decode(m.Value); err != nil { - return err - } if err := m.decodeSet(); err != nil { return err } - case CompressionLZ4: - if m.Value == nil { - break - } - reader := lz4.NewReader(bytes.NewReader(m.Value)) - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - if err := m.decodeSet(); err != nil { - return err - } - - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} } return pd.pop() } -// decodes a message set from a previousy encoded bulk-message +// decodes a message set from a previously encoded bulk-message func (m *Message) decodeSet() (err error) { pd := realDecoder{raw: m.Value} m.Set = &MessageSet{} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go index f028784e..6523ec2f 100644 --- a/vendor/github.com/Shopify/sarama/message_set.go +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -29,7 +29,10 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) { return err } - if err = pd.push(&lengthField{}); err != nil { + lengthDecoder := acquireLengthField() + defer releaseLengthField(lengthDecoder) + + if err = pd.push(lengthDecoder); err != nil { return err } @@ -47,6 +50,7 @@ func (msb *MessageBlock) decode(pd packetDecoder) (err error) { type MessageSet struct { PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + OverflowMessage bool // whether the set on the wire contained an overflow message Messages []*MessageBlock } @@ -64,6 +68,19 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { ms.Messages = nil for pd.remaining() > 0 { + magic, err := magicValue(pd) + if err != nil { + if err == ErrInsufficientData { + ms.PartialTrailingMessage = true + return nil + } + return err + } + + if magic > 1 { + return nil + } + msb := new(MessageBlock) err = msb.decode(pd) switch err { @@ -72,7 +89,12 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) { case ErrInsufficientData: // As an optimization the server is allowed to return a partial message at the // end of the message set. Clients should handle this case. So we just ignore such things. - ms.PartialTrailingMessage = true + if msb.Offset == -1 { + // This is an overflow message caused by chunked down conversion + ms.OverflowMessage = true + } else { + ms.PartialTrailingMessage = true + } return nil default: return err diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go index 9a26b55f..e835f5a9 100644 --- a/vendor/github.com/Shopify/sarama/metadata_request.go +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -1,40 +1,58 @@ package sarama type MetadataRequest struct { - Topics []string + Version int16 + Topics []string + AllowAutoTopicCreation bool } func (r *MetadataRequest) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(r.Topics)) - if err != nil { - return err + if r.Version < 0 || r.Version > 5 { + return PacketEncodingError{"invalid or unsupported MetadataRequest version field"} } - - for i := range r.Topics { - err = pe.putString(r.Topics[i]) + if r.Version == 0 || len(r.Topics) > 0 { + err := pe.putArrayLength(len(r.Topics)) if err != nil { return err } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + } else { + pe.putInt32(-1) + } + if r.Version > 3 { + pe.putBool(r.AllowAutoTopicCreation) } return nil } func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { - topicCount, err := pd.getArrayLength() + r.Version = version + size, err := pd.getInt32() if err != nil { return err } - if topicCount == 0 { - return nil + if size > 0 { + r.Topics = make([]string, size) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } } - - r.Topics = make([]string, topicCount) - for i := range r.Topics { - topic, err := pd.getString() + if r.Version > 3 { + autoCreation, err := pd.getBool() if err != nil { return err } - r.Topics[i] = topic + r.AllowAutoTopicCreation = autoCreation } return nil } @@ -44,9 +62,24 @@ func (r *MetadataRequest) key() int16 { } func (r *MetadataRequest) version() int16 { - return 0 + return r.Version +} + +func (r *MetadataRequest) headerVersion() int16 { + return 1 } func (r *MetadataRequest) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } } diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go index f9d6a427..0bb8702c 100644 --- a/vendor/github.com/Shopify/sarama/metadata_response.go +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -1,14 +1,15 @@ package sarama type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 } -func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { +func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err @@ -35,10 +36,17 @@ func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { return err } + if version >= 5 { + pm.OfflineReplicas, err = pd.getInt32Array() + if err != nil { + return err + } + } + return nil } -func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { +func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(pm.Err)) pe.putInt32(pm.ID) pe.putInt32(pm.Leader) @@ -53,16 +61,24 @@ func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { return err } + if version >= 5 { + err = pe.putInt32Array(pm.OfflineReplicas) + if err != nil { + return err + } + } + return nil } type TopicMetadata struct { Err KError Name string + IsInternal bool // Only valid for Version >= 1 Partitions []*PartitionMetadata } -func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { +func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) { tmp, err := pd.getInt16() if err != nil { return err @@ -74,6 +90,13 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { return err } + if version >= 1 { + tm.IsInternal, err = pd.getBool() + if err != nil { + return err + } + } + n, err := pd.getArrayLength() if err != nil { return err @@ -81,7 +104,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { tm.Partitions = make([]*PartitionMetadata, n) for i := 0; i < n; i++ { tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd) + err = tm.Partitions[i].decode(pd, version) if err != nil { return err } @@ -90,7 +113,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { return nil } -func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { +func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) { pe.putInt16(int16(tm.Err)) err = pe.putString(tm.Name) @@ -98,13 +121,17 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { return err } + if version >= 1 { + pe.putBool(tm.IsInternal) + } + err = pe.putArrayLength(len(tm.Partitions)) if err != nil { return err } for _, pm := range tm.Partitions { - err = pm.encode(pe) + err = pm.encode(pe, version) if err != nil { return err } @@ -114,11 +141,24 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { } type MetadataResponse struct { - Brokers []*Broker - Topics []*TopicMetadata + Version int16 + ThrottleTimeMs int32 + Brokers []*Broker + ClusterID *string + ControllerID int32 + Topics []*TopicMetadata } func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + n, err := pd.getArrayLength() if err != nil { return err @@ -127,12 +167,28 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { r.Brokers = make([]*Broker, n) for i := 0; i < n; i++ { r.Brokers[i] = new(Broker) - err = r.Brokers[i].decode(pd) + err = r.Brokers[i].decode(pd, version) if err != nil { return err } } + if version >= 2 { + r.ClusterID, err = pd.getNullableString() + if err != nil { + return err + } + } + + if version >= 1 { + r.ControllerID, err = pd.getInt32() + if err != nil { + return err + } + } else { + r.ControllerID = -1 + } + n, err = pd.getArrayLength() if err != nil { return err @@ -141,7 +197,7 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { r.Topics = make([]*TopicMetadata, n) for i := 0; i < n; i++ { r.Topics[i] = new(TopicMetadata) - err = r.Topics[i].decode(pd) + err = r.Topics[i].decode(pd, version) if err != nil { return err } @@ -151,23 +207,38 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { } func (r *MetadataResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + err := pe.putArrayLength(len(r.Brokers)) if err != nil { return err } for _, broker := range r.Brokers { - err = broker.encode(pe) + err = broker.encode(pe, r.Version) if err != nil { return err } } + if r.Version >= 2 { + err := pe.putNullableString(r.ClusterID) + if err != nil { + return err + } + } + + if r.Version >= 1 { + pe.putInt32(r.ControllerID) + } + err = pe.putArrayLength(len(r.Topics)) if err != nil { return err } for _, tm := range r.Topics { - err = tm.encode(pe) + err = tm.encode(pe, r.Version) if err != nil { return err } @@ -181,11 +252,26 @@ func (r *MetadataResponse) key() int16 { } func (r *MetadataResponse) version() int16 { + return r.Version +} + +func (r *MetadataResponse) headerVersion() int16 { return 0 } func (r *MetadataResponse) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_10_0_0 + case 2: + return V0_10_1_0 + case 3, 4: + return V0_11_0_0 + case 5: + return V1_0_0_0 + default: + return MinVersion + } } // testing API @@ -214,7 +300,7 @@ foundTopic: return tmatch } -func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) { tmatch := r.AddTopic(topic, ErrNoError) var pmatch *PartitionMetadata @@ -234,6 +320,6 @@ foundPartition: pmatch.Leader = brokerID pmatch.Replicas = replicas pmatch.Isr = isr + pmatch.OfflineReplicas = offline pmatch.Err = err - } diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go index 4869708e..90e5a87f 100644 --- a/vendor/github.com/Shopify/sarama/metrics.go +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -28,14 +28,6 @@ func getMetricNameForBroker(name string, broker *Broker) string { return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) } -func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { - return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) -} - -func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { - return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) -} - func getMetricNameForTopic(name string, topic string) string { // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy // cf. KAFKA-1902 and KAFKA-2337 diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go index 0734d34f..ff5a68ae 100644 --- a/vendor/github.com/Shopify/sarama/mockbroker.go +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -18,7 +18,9 @@ const ( expectationTimeout = 500 * time.Millisecond ) -type requestHandlerFunc func(req *request) (res encoder) +type GSSApiHandlerFunc func([]byte) []byte + +type requestHandlerFunc func(req *request) (res encoderWithHeader) // RequestNotifierFunc is invoked when a mock broker processes a request successfully // and will provides the number of bytes read and written. @@ -49,18 +51,19 @@ type RequestNotifierFunc func(bytesRead, bytesWritten int) // It is not necessary to prefix message length or correlation ID to your // response bytes, the server does that automatically as a convenience. type MockBroker struct { - brokerID int32 - port int32 - closing chan none - stopper chan none - expectations chan encoder - listener net.Listener - t TestReporter - latency time.Duration - handler requestHandlerFunc - notifier RequestNotifierFunc - history []RequestResponse - lock sync.Mutex + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoderWithHeader + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex + gssApiHandler GSSApiHandlerFunc } // RequestResponse represents a Request/Response pair processed by MockBroker. @@ -80,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { + b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() mockResponse := handlerMap[reqTypeName] if mockResponse == nil { @@ -173,7 +176,44 @@ func (b *MockBroker) serverLoop() { Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) } -func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { +func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) { + b.gssApiHandler = handler +} + +func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, err + } + + bytesRead += len(lengthBytes) + length := int32(binary.BigEndian.Uint32(lengthBytes)) + + if length <= 4 || length > MaxRequestSize { + return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, err + } + + bytesRead += len(encodedReq) + + fullBytes := append(lengthBytes, encodedReq...) + + return fullBytes, nil +} + +func (b *MockBroker) isGSSAPI(buffer []byte) bool { + return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04}) +} + +func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) { defer wg.Done() defer func() { _ = conn.Close() @@ -191,65 +231,110 @@ func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) } }() - resHeader := make([]byte, 8) + var bytesWritten int + var bytesRead int for { - req, bytesRead, err := decodeRequest(conn) + buffer, err := b.readToBytes(conn) if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer)) b.serverError(err) break } - if b.latency > 0 { - time.Sleep(b.latency) - } - - b.lock.Lock() - res := b.handler(req) - b.history = append(b.history, RequestResponse{req.body, res}) - b.lock.Unlock() + bytesWritten = 0 + if !b.isGSSAPI(buffer) { + req, br, err := decodeRequest(bytes.NewReader(buffer)) + bytesRead = br + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } - if res == nil { - Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) - continue - } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + if b.latency > 0 { + time.Sleep(b.latency) + } - encodedRes, err := encode(res, nil) - if err != nil { - b.serverError(err) - break - } - if len(encodedRes) == 0 { b.lock.Lock() - if b.notifier != nil { - b.notifier(bytesRead, 0) - } + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) b.lock.Unlock() - continue - } - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) - if _, err = conn.Write(resHeader); err != nil { - b.serverError(err) - break - } - if _, err = conn.Write(encodedRes); err != nil { - b.serverError(err) - break + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + bytesWritten = len(resHeader) + len(encodedRes) + } else { + // GSSAPI is not part of kafka protocol, but is supported for authentication proposes. + // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism + b.lock.Lock() + res := b.gssApiHandler(buffer) + b.lock.Unlock() + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer)) + continue + } + if _, err = conn.Write(res); err != nil { + b.serverError(err) + break + } + bytesWritten = len(res) } b.lock.Lock() if b.notifier != nil { - b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + b.notifier(bytesRead, bytesWritten) } b.lock.Unlock() } Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { select { case res, ok := <-b.expectations: if !ok { @@ -288,6 +373,15 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { // NewMockBrokerAddr behaves like newMockBroker but listens on the address you give // it rather than just some ephemeral port. func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + listener, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + return NewMockBrokerListener(t, brokerID, listener) +} + +// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified. +func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker { var err error broker := &MockBroker{ @@ -295,14 +389,11 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker stopper: make(chan none), t: t, brokerID: brokerID, - expectations: make(chan encoder, 512), + expectations: make(chan encoderWithHeader, 512), + listener: listener, } broker.handler = broker.defaultRequestHandler - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) if err != nil { @@ -319,6 +410,6 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker return broker } -func (b *MockBroker) Returns(e encoder) { +func (b *MockBroker) Returns(e encoderWithHeader) { b.expectations <- e } diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/Shopify/sarama/mockkerberos.go new file mode 100644 index 00000000..d36649d8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockkerberos.go @@ -0,0 +1,123 @@ +package sarama + +import ( + "encoding/binary" + "encoding/hex" + + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/gssapi" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type KafkaGSSAPIHandler struct { + client *MockKerberosClient + badResponse bool + badKeyChecksum bool +} + +func (h *KafkaGSSAPIHandler) MockKafkaGSSAPI(buffer []byte) []byte { + // Default payload used for verify + err := h.client.Login() // Mock client construct keys when login + if err != nil { + return nil + } + if h.badResponse { // Returns trash + return []byte{0x00, 0x00, 0x00, 0x01, 0xAD} + } + + var pack = gssapi.WrapToken{ + Flags: KRB5_USER_AUTH, + EC: 12, + RRC: 0, + SndSeqNum: 3398292281, + Payload: []byte{0x11, 0x00}, // 1100 + } + // Compute checksum + if h.badKeyChecksum { + pack.CheckSum = []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} + } else { + err = pack.SetCheckSum(h.client.ASRep.DecryptedEncPart.Key, keyusage.GSSAPI_ACCEPTOR_SEAL) + if err != nil { + return nil + } + } + + packBytes, err := pack.Marshal() + if err != nil { + return nil + } + lenBytes := len(packBytes) + response := make([]byte, lenBytes+4) + copy(response[4:], packBytes) + binary.BigEndian.PutUint32(response, uint32(lenBytes)) + return response +} + +type MockKerberosClient struct { + asRepBytes string + ASRep messages.ASRep + credentials *credentials.Credentials + mockError error + errorStage string +} + +func (c *MockKerberosClient) Login() error { + if c.errorStage == "login" && c.mockError != nil { + return c.mockError + } + c.asRepBytes = "6b8202e9308202e5a003020105a10302010ba22b30293027a103020113a220041e301c301aa003020112a1131b114" + + "558414d504c452e434f4d636c69656e74a30d1b0b4558414d504c452e434f4da4133011a003020101a10a30081b06636c69656e7" + + "4a5820156618201523082014ea003020105a10d1b0b4558414d504c452e434f4da220301ea003020102a11730151b066b7262746" + + "7741b0b4558414d504c452e434f4da382011430820110a003020112a103020101a28201020481ffdb9891175d106818e61008c51" + + "d0b3462bca92f3bf9d4cfa82de4c4d7aff9994ec87c573e3a3d54dcb2bb79618c76f2bf4a3d006f90d5bdbd049bc18f48be39203" + + "549ca02acaf63f292b12404f9b74c34b83687119d8f56552ccc0c50ebee2a53bb114c1b4619bb1d5d31f0f49b4d40a08a9b4c046" + + "2e1398d0b648be1c0e50c552ad16e1d8d8e74263dd0bf0ec591e4797dfd40a9a1be4ae830d03a306e053fd7586fef84ffc5e4a83" + + "7c3122bf3e6a40fe87e84019f6283634461b955712b44a5f7386c278bff94ec2c2dc0403247e29c2450e853471ceababf9b8911f" + + "997f2e3010b046d2c49eb438afb0f4c210821e80d4ffa4c9521eb895dcd68610b3feaa682012c30820128a003020112a282011f0" + + "482011bce73cbce3f1dd17661c412005f0f2257c756fe8e98ff97e6ec24b7bab66e5fd3a3827aeeae4757af0c6e892948122d8b2" + + "03c8df48df0ef5d142d0e416d688f11daa0fcd63d96bdd431d02b8e951c664eeff286a2be62383d274a04016d5f0e141da58cb86" + + "331de64063062f4f885e8e9ce5b181ca2fdc67897c5995e0ae1ae0c171a64493ff7bd91bc6d89cd4fce1e2b3ea0a10e34b0d5eda" + + "aa38ee727b50c5632ed1d2f2b457908e616178d0d80b72af209fb8ac9dbaa1768fa45931392b36b6d8c12400f8ded2efaa0654d0" + + "da1db966e8b5aab4706c800f95d559664646041fdb38b411c62fc0fbe0d25083a28562b0e1c8df16e62e9d5626b0addee489835f" + + "eedb0f26c05baa596b69b17f47920aa64b29dc77cfcc97ba47885" + apRepBytes, err := hex.DecodeString(c.asRepBytes) + if err != nil { + return err + } + err = c.ASRep.Unmarshal(apRepBytes) + if err != nil { + return err + } + c.credentials = credentials.New("client", "EXAMPLE.COM").WithPassword("qwerty") + _, err = c.ASRep.DecryptEncPart(c.credentials) + if err != nil { + return err + } + return nil +} + +func (c *MockKerberosClient) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + if c.errorStage == "service_ticket" && c.mockError != nil { + return messages.Ticket{}, types.EncryptionKey{}, c.mockError + } + return c.ASRep.Ticket, c.ASRep.DecryptedEncPart.Key, nil +} + +func (c *MockKerberosClient) Domain() string { + return "EXAMPLE.COM" +} +func (c *MockKerberosClient) CName() types.PrincipalName { + var p = types.PrincipalName{ + NameType: KRB5_USER_AUTH, + NameString: []string{ + "kafka", + "kafka", + }, + } + return p +} +func (c *MockKerberosClient) Destroy() { + // Do nothing. +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go index a2031420..2560da8a 100644 --- a/vendor/github.com/Shopify/sarama/mockresponses.go +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -2,6 +2,7 @@ package sarama import ( "fmt" + "strings" ) // TestReporter has methods matching go's testing.T to avoid importing @@ -17,20 +18,20 @@ type TestReporter interface { // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) + For(reqBody versionedDecoder) (res encoderWithHeader) } // MockWrapper is a mock response builder that returns a particular concrete // response regardless of the actual request passed to the `For` method. type MockWrapper struct { - res encoder + res encoderWithHeader } -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { return mw.res } -func NewMockWrapper(res encoder) *MockWrapper { +func NewMockWrapper(res encoderWithHeader) *MockWrapper { return &MockWrapper{res: res} } @@ -49,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { switch res := res.(type) { case MockResponse: ms.responses[i] = res - case encoder: + case encoderWithHeader: ms.responses[i] = NewMockWrapper(res) default: panic(fmt.Sprintf("Unexpected response type: %T", res)) @@ -58,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { return ms } -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] @@ -66,11 +67,75 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { return res } +type MockListGroupsResponse struct { + groups map[string]string + t TestReporter +} + +func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { + return &MockListGroupsResponse{ + groups: make(map[string]string), + t: t, + } +} + +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*ListGroupsRequest) + _ = request + response := &ListGroupsResponse{ + Groups: m.groups, + } + return response +} + +func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse { + m.groups[groupID] = protocolType + return m +} + +type MockDescribeGroupsResponse struct { + groups map[string]*GroupDescription + t TestReporter +} + +func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse { + return &MockDescribeGroupsResponse{ + t: t, + groups: make(map[string]*GroupDescription), + } +} + +func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse { + m.groups[groupID] = description + return m +} + +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + request := reqBody.(*DescribeGroupsRequest) + + response := &DescribeGroupsResponse{} + for _, requestedGroup := range request.Groups { + if group, ok := m.groups[requestedGroup]; ok { + response.Groups = append(response.Groups, group) + } else { + // Mimic real kafka - if a group doesn't exist, return + // an entry with state "Dead" + response.Groups = append(response.Groups, &GroupDescription{ + GroupId: requestedGroup, + State: "Dead", + }) + } + } + + return response +} + // MockMetadataResponse is a `MetadataResponse` builder. type MockMetadataResponse struct { - leaders map[string]map[int32]int32 - brokers map[string]int32 - t TestReporter + controllerID int32 + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter } func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { @@ -96,23 +161,39 @@ func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMet return mmr } -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse { + mmr.controllerID = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { metadataRequest := reqBody.(*MetadataRequest) - metadataResponse := &MetadataResponse{} + metadataResponse := &MetadataResponse{ + Version: metadataRequest.version(), + ControllerID: mmr.controllerID, + } for addr, brokerID := range mmr.brokers { metadataResponse.AddBroker(addr, brokerID) } + + // Generate set of replicas + replicas := []int32{} + offlineReplicas := []int32{} + for _, brokerID := range mmr.brokers { + replicas = append(replicas, brokerID) + } + if len(metadataRequest.Topics) == 0 { for topic, partitions := range mmr.leaders { for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse } for _, topic := range metadataRequest.Topics { for partition, brokerID := range mmr.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError) } } return metadataResponse @@ -122,6 +203,7 @@ func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { type MockOffsetResponse struct { offsets map[string]map[int32]map[int64]int64 t TestReporter + version int16 } func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { @@ -131,6 +213,11 @@ func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { } } +func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse { + mor.version = version + return mor +} + func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { partitions := mor.offsets[topic] if partitions == nil { @@ -146,9 +233,9 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of return mor } -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{} + offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { for partition, block := range partitions { offset := mor.getOffset(topic, partition, block.time) @@ -180,6 +267,7 @@ type MockFetchResponse struct { highWaterMarks map[string]map[int32]int64 t TestReporter batchSize int + version int16 } func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { @@ -191,6 +279,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { } } +func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse { + mfr.version = version + return mfr +} + func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { partitions := mfr.messages[topic] if partitions == nil { @@ -216,9 +309,11 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of return mfr } -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{} + res := &FetchResponse{ + Version: mfr.version, + } for topic, partitions := range fetchRequest.blocks { for partition, block := range partitions { initialOffset := block.fetchOffset @@ -298,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M return mr } -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{} @@ -312,6 +407,60 @@ func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { return res } +// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder. +type MockFindCoordinatorResponse struct { + groupCoordinators map[string]interface{} + transCoordinators map[string]interface{} + t TestReporter +} + +func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse { + return &MockFindCoordinatorResponse{ + groupCoordinators: make(map[string]interface{}), + transCoordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = broker + case CoordinatorTransaction: + mr.transCoordinators[group] = broker + } + return mr +} + +func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse { + switch coordinatorType { + case CoordinatorGroup: + mr.groupCoordinators[group] = kerror + case CoordinatorTransaction: + mr.transCoordinators[group] = kerror + } + return mr +} + +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*FindCoordinatorRequest) + res := &FindCoordinatorResponse{} + var v interface{} + switch req.CoordinatorType { + case CoordinatorGroup: + v = mr.groupCoordinators[req.CoordinatorKey] + case CoordinatorTransaction: + v = mr.transCoordinators[req.CoordinatorKey] + } + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + // MockOffsetCommitResponse is a `OffsetCommitResponse` builder. type MockOffsetCommitResponse struct { errors map[string]map[string]map[int32]KError @@ -340,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 return mr } -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{} @@ -370,14 +519,20 @@ func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int3 // MockProduceResponse is a `ProduceResponse` builder. type MockProduceResponse struct { - errors map[string]map[int32]KError - t TestReporter + version int16 + errors map[string]map[int32]KError + t TestReporter } func NewMockProduceResponse(t TestReporter) *MockProduceResponse { return &MockProduceResponse{t: t} } +func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse { + mr.version = version + return mr +} + func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { if mr.errors == nil { mr.errors = make(map[string]map[int32]KError) @@ -391,10 +546,12 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE return mr } -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.msgSets { + res := &ProduceResponse{ + Version: mr.version, + } + for topic, partitions := range req.records { for partition := range partitions { res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) } @@ -417,6 +574,7 @@ func (mr *MockProduceResponse) getError(topic string, partition int32) KError { // MockOffsetFetchResponse is a `OffsetFetchResponse` builder. type MockOffsetFetchResponse struct { offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + error KError t TestReporter } @@ -438,18 +596,526 @@ func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int3 partitions = make(map[int32]*OffsetFetchResponseBlock) topics[topic] = partitions } - partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror} return mr } -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse { + mr.error = kerror + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup - res := &OffsetFetchResponse{} + res := &OffsetFetchResponse{Version: req.Version} + for topic, partitions := range mr.offsets[group] { for partition, block := range partitions { res.AddBlock(topic, partition, block) } } + + if res.Version >= 2 { + res.Err = mr.error + } + return res +} + +type MockCreateTopicsResponse struct { + t TestReporter +} + +func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { + return &MockCreateTopicsResponse{t: t} +} + +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateTopicsRequest) + res := &CreateTopicsResponse{ + Version: req.Version, + } + res.TopicErrors = make(map[string]*TopicError) + + for topic := range req.TopicDetails { + if res.Version >= 1 && strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create topic with reserved prefix" + res.TopicErrors[topic] = &TopicError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicErrors[topic] = &TopicError{Err: ErrNoError} + } + return res +} + +type MockDeleteTopicsResponse struct { + t TestReporter +} + +func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { + return &MockDeleteTopicsResponse{t: t} +} + +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteTopicsRequest) + res := &DeleteTopicsResponse{} + res.TopicErrorCodes = make(map[string]KError) + + for _, topic := range req.Topics { + res.TopicErrorCodes[topic] = ErrNoError + } + res.Version = req.Version + return res +} + +type MockCreatePartitionsResponse struct { + t TestReporter +} + +func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse { + return &MockCreatePartitionsResponse{t: t} +} + +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreatePartitionsRequest) + res := &CreatePartitionsResponse{} + res.TopicPartitionErrors = make(map[string]*TopicPartitionError) + + for topic := range req.TopicPartitions { + if strings.HasPrefix(topic, "_") { + msg := "insufficient permissions to create partition on topic with reserved prefix" + res.TopicPartitionErrors[topic] = &TopicPartitionError{ + Err: ErrTopicAuthorizationFailed, + ErrMsg: &msg, + } + continue + } + res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError} + } return res } + +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + +type MockDeleteRecordsResponse struct { + t TestReporter +} + +func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { + return &MockDeleteRecordsResponse{t: t} +} + +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteRecordsRequest) + res := &DeleteRecordsResponse{} + res.Topics = make(map[string]*DeleteRecordsResponseTopic) + + for topic, deleteRecordRequestTopic := range req.Topics { + partitions := make(map[int32]*DeleteRecordsResponsePartition) + for partition := range deleteRecordRequestTopic.PartitionOffsets { + partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError} + } + res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions} + } + return res +} + +type MockDescribeConfigsResponse struct { + t TestReporter +} + +func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse { + return &MockDescribeConfigsResponse{t: t} +} + +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + includeSynonyms := (req.Version > 0) + includeSource := (req.Version > 0) + + for _, r := range req.Resources { + var configEntries []*ConfigEntry + switch r.Type { + case BrokerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "min.insync.replicas", + Value: "2", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case BrokerLoggerResource: + configEntries = append(configEntries, + &ConfigEntry{ + Name: "kafka.controller.KafkaController", + Value: "DEBUG", + ReadOnly: false, + Default: false, + }, + ) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + case TopicResource: + maxMessageBytes := &ConfigEntry{Name: "max.message.bytes", + Value: "1000000", + ReadOnly: false, + Default: !includeSource, + Sensitive: false, + } + if includeSource { + maxMessageBytes.Source = SourceDefault + } + if includeSynonyms { + maxMessageBytes.Synonyms = []*ConfigSynonym{ + { + ConfigName: "max.message.bytes", + ConfigValue: "500000", + }, + } + } + retentionMs := &ConfigEntry{Name: "retention.ms", + Value: "5000", + ReadOnly: false, + Default: false, + Sensitive: false, + } + if includeSynonyms { + retentionMs.Synonyms = []*ConfigSynonym{ + { + ConfigName: "log.retention.ms", + ConfigValue: "2500", + }, + } + } + password := &ConfigEntry{Name: "password", + Value: "12345", + ReadOnly: false, + Default: false, + Sensitive: true, + } + configEntries = append( + configEntries, maxMessageBytes, retentionMs, password) + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Configs: configEntries, + }) + } + } + return res +} + +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponse struct { + t TestReporter +} + +func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { + return &MockAlterConfigsResponse{t: t} +} + +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name, + Type: r.Type, + ErrorMsg: "", + }) + } + return res +} + +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + +type MockCreateAclsResponse struct { + t TestReporter +} + +func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { + return &MockCreateAclsResponse{t: t} +} + +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*CreateAclsRequest) + res := &CreateAclsResponse{} + + for range req.AclCreations { + res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError}) + } + return res +} + +type MockListAclsResponse struct { + t TestReporter +} + +func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { + return &MockListAclsResponse{t: t} +} + +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeAclsRequest) + res := &DescribeAclsResponse{} + res.Err = ErrNoError + acl := &ResourceAcls{} + if req.ResourceName != nil { + acl.Resource.ResourceName = *req.ResourceName + } + acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter + acl.Resource.ResourceType = req.ResourceType + + host := "*" + if req.Host != nil { + host = *req.Host + } + + principal := "User:test" + if req.Principal != nil { + principal = *req.Principal + } + + permissionType := req.PermissionType + if permissionType == AclPermissionAny { + permissionType = AclPermissionAllow + } + + acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal}) + res.ResourceAcls = append(res.ResourceAcls, acl) + res.Version = int16(req.Version) + return res +} + +type MockSaslAuthenticateResponse struct { + t TestReporter + kerror KError + saslAuthBytes []byte +} + +func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse { + return &MockSaslAuthenticateResponse{t: t} +} + +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslAuthenticateResponse{} + res.Err = msar.kerror + res.SaslAuthBytes = msar.saslAuthBytes + return res +} + +func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse { + msar.kerror = kerror + return msar +} + +func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse { + msar.saslAuthBytes = saslAuthBytes + return msar +} + +type MockDeleteAclsResponse struct { + t TestReporter +} + +type MockSaslHandshakeResponse struct { + enabledMechanisms []string + kerror KError + t TestReporter +} + +func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { + return &MockSaslHandshakeResponse{t: t} +} + +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { + res := &SaslHandshakeResponse{} + res.Err = mshr.kerror + res.EnabledMechanisms = mshr.enabledMechanisms + return res +} + +func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse { + mshr.kerror = kerror + return mshr +} + +func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse { + mshr.enabledMechanisms = enabledMechanisms + return mshr +} + +func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { + return &MockDeleteAclsResponse{t: t} +} + +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DeleteAclsRequest) + res := &DeleteAclsResponse{} + + for range req.Filters { + response := &FilterResponse{Err: ErrNoError} + response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError}) + res.FilterResponses = append(res.FilterResponses, response) + } + res.Version = int16(req.Version) + return res +} + +type MockDeleteGroupsResponse struct { + deletedGroups []string +} + +func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse { + return &MockDeleteGroupsResponse{} +} + +func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse { + m.deletedGroups = groups + return m +} + +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DeleteGroupsResponse{ + GroupErrorCodes: map[string]KError{}, + } + for _, group := range m.deletedGroups { + resp.GroupErrorCodes[group] = ErrNoError + } + return resp +} + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + topics := []DescribeLogDirsResponseTopic{} + for topic := range topicPartitions { + partitions := []DescribeLogDirsResponsePartition{} + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go index d1d9ba41..cbd911c6 100644 --- a/vendor/github.com/Shopify/sarama/mocks/async_producer.go +++ b/vendor/github.com/Shopify/sarama/mocks/async_producer.go @@ -33,7 +33,7 @@ func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { } mp := &AsyncProducer{ t: t, - closed: make(chan struct{}, 0), + closed: make(chan struct{}), expectations: make([]*producerExpectation, 0), input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), @@ -44,6 +44,7 @@ func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { defer func() { close(mp.successes) close(mp.errors) + close(mp.closed) }() for msg := range mp.input { @@ -86,8 +87,6 @@ func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) } mp.l.Unlock() - - close(mp.closed) }() return mp @@ -147,7 +146,7 @@ func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecke mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) } -// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message // will be provided on the input channel. The mock producer will first call the given function to // check the message value. If an error is returned it will be made available on the Errors channel // otherwise the mock will handle the message as if it failed to produce successfully. This means diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go index a524190a..451eb08d 100644 --- a/vendor/github.com/Shopify/sarama/mocks/consumer.go +++ b/vendor/github.com/Shopify/sarama/mocks/consumer.go @@ -20,7 +20,7 @@ type Consumer struct { // NewConsumer returns a new mock Consumer instance. The t argument should // be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused and can be set to nil. +// an expectation is violated. The config argument can be set to nil. func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { if config == nil { config = sarama.NewConfig() @@ -63,13 +63,13 @@ func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) return pc, nil } -// Topics returns a list of topics, as registered with SetMetadata +// Topics returns a list of topics, as registered with SetTopicMetadata func (c *Consumer) Topics() ([]string, error) { c.l.Lock() defer c.l.Unlock() if c.metadata == nil { - c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetTopicMetadata.") return nil, sarama.ErrOutOfBrokers } @@ -80,13 +80,13 @@ func (c *Consumer) Topics() ([]string, error) { return result, nil } -// Partitions returns the list of parititons for the given topic, as registered with SetMetadata +// Partitions returns the list of parititons for the given topic, as registered with SetTopicMetadata func (c *Consumer) Partitions(topic string) ([]int32, error) { c.l.Lock() defer c.l.Unlock() if c.metadata == nil { - c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetTopicMetadata.") return nil, sarama.ErrOutOfBrokers } if c.metadata[topic] == nil { @@ -178,6 +178,7 @@ func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset // Errors and Messages channel, you should specify what values will be provided on these // channels using YieldMessage and YieldError. type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG l sync.Mutex t ErrorReporter topic string @@ -189,7 +190,6 @@ type PartitionConsumer struct { consumed bool errorsShouldBeDrained bool messagesShouldBeDrained bool - highWaterMarkOffset int64 } /////////////////////////////////////////////////// @@ -244,7 +244,7 @@ func (pc *PartitionConsumer) Close() error { wg.Add(1) go func() { defer wg.Done() - for _ = range pc.messages { + for range pc.messages { // drain } }() diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go index 4adb838d..1e1d9f1c 100644 --- a/vendor/github.com/Shopify/sarama/mocks/mocks.go +++ b/vendor/github.com/Shopify/sarama/mocks/mocks.go @@ -15,8 +15,6 @@ package mocks import ( "errors" - - "github.com/Shopify/sarama" ) // ErrorReporter is a simple interface that includes the testing.T methods we use to report @@ -41,8 +39,3 @@ type producerExpectation struct { Result error CheckFunction ValueChecker } - -type consumerExpectation struct { - Err error - Msg *sarama.ConsumerMessage -} diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go index 2ac7b5c3..3f4986e2 100644 --- a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go @@ -46,28 +46,27 @@ func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int3 expectation := sp.expectations[0] sp.expectations = sp.expectations[1:] if expectation.CheckFunction != nil { - if val, err := msg.Value.Encode(); err != nil { + val, err := msg.Value.Encode() + if err != nil { sp.t.Errorf("Input message encoding failed: %s", err.Error()) return -1, -1, err - } else { - err := expectation.CheckFunction(val) - if err != nil { - sp.t.Errorf("Check function returned an error: %s", err.Error()) - return -1, -1, err - } + } + + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck } } if expectation.Result == errProduceSuccess { sp.lastOffset++ msg.Offset = sp.lastOffset return 0, msg.Offset, nil - } else { - return -1, -1, expectation.Result } - } else { - sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - return -1, -1, errOutOfExpectations + return -1, -1, expectation.Result } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations } // SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. @@ -79,20 +78,30 @@ func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { defer sp.l.Unlock() if len(sp.expectations) >= len(msgs) { - expectations := sp.expectations[0 : len(msgs)-1] + expectations := sp.expectations[0:len(msgs)] sp.expectations = sp.expectations[len(msgs):] - for _, expectation := range expectations { + for i, expectation := range expectations { + if expectation.CheckFunction != nil { + val, err := msgs[i].Value.Encode() + if err != nil { + sp.t.Errorf("Input message encoding failed: %s", err.Error()) + return err + } + errCheck := expectation.CheckFunction(val) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } if expectation.Result != errProduceSuccess { return expectation.Result } - } return nil - } else { - sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") - return errOutOfExpectations } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations } // Close corresponds with the Close method of sarama's SyncProducer implementation. @@ -123,7 +132,7 @@ func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueC sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) } -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be // called. The mock producer will first call the given function to check the message value. // It will cascade the error of the function, if any, or handle the message as if it failed // to produce successfully, i.e. by returning the provided error. diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go index b21ea634..9931cade 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_request.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -1,5 +1,7 @@ package sarama +import "errors" + // ReceiveTime is a special value for the timestamp field of Offset Commit Requests which // tells the broker to set the timestamp to the time at which the request was received. // The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. @@ -50,12 +52,14 @@ type OffsetCommitRequest struct { // - 0 (kafka 0.8.1 and later) // - 1 (kafka 0.8.2 and later) // - 2 (kafka 0.9.0 and later) + // - 3 (kafka 0.11.0 and later) + // - 4 (kafka 2.0.0 and later) Version int16 blocks map[string]map[int32]*offsetCommitRequestBlock } func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 2 { + if r.Version < 0 || r.Version > 4 { return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} } @@ -166,14 +170,22 @@ func (r *OffsetCommitRequest) version() int16 { return r.Version } +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_8_2_0 case 2: return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 default: - return minVersion + return MinVersion } } @@ -188,3 +200,15 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} } + +func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) { + partitions := r.blocks[topic] + if partitions == nil { + return 0, "", errors.New("no such offset") + } + block := partitions[partitionID] + if block == nil { + return 0, "", errors.New("no such offset") + } + return block.offset, block.metadata, nil +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go index 7f277e77..342260ef 100644 --- a/vendor/github.com/Shopify/sarama/offset_commit_response.go +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -1,7 +1,9 @@ package sarama type OffsetCommitResponse struct { - Errors map[string]map[int32]KError + Version int16 + ThrottleTimeMs int32 + Errors map[string]map[int32]KError } func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { @@ -17,6 +19,9 @@ func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KE } func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } if err := pe.putArrayLength(len(r.Errors)); err != nil { return err } @@ -36,6 +41,15 @@ func (r *OffsetCommitResponse) encode(pe packetEncoder) error { } func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() + if err != nil { + return err + } + } + numTopics, err := pd.getArrayLength() if err != nil || numTopics == 0 { return err @@ -77,9 +91,24 @@ func (r *OffsetCommitResponse) key() int16 { } func (r *OffsetCommitResponse) version() int16 { + return r.Version +} + +func (r *OffsetCommitResponse) headerVersion() int16 { return 0 } func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + default: + return MinVersion + } } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go index b19fe79b..51e9faa3 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -1,28 +1,33 @@ package sarama type OffsetFetchRequest struct { - ConsumerGroup string Version int16 + ConsumerGroup string partitions map[string][]int32 } func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 1 { + if r.Version < 0 || r.Version > 5 { return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} } if err = pe.putString(r.ConsumerGroup); err != nil { return err } - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err - } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { + + if r.Version >= 2 && r.partitions == nil { + pe.putInt32(-1) + } else { + if err = pe.putArrayLength(len(r.partitions)); err != nil { return err } - if err = pe.putInt32Array(partitions); err != nil { - return err + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } } } return nil @@ -37,7 +42,7 @@ func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) if err != nil { return err } - if partitionCount == 0 { + if (partitionCount == 0 && version < 2) || partitionCount < 0 { return nil } r.partitions = make(map[string][]int32) @@ -63,12 +68,30 @@ func (r *OffsetFetchRequest) version() int16 { return r.Version } +func (r *OffsetFetchRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 default: - return minVersion + return MinVersion + } +} + +func (r *OffsetFetchRequest) ZeroPartitions() { + if r.partitions == nil && r.Version >= 2 { + r.partitions = make(map[string][]int32) } } diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go index 323220ea..9c64e070 100644 --- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -1,17 +1,25 @@ package sarama type OffsetFetchResponseBlock struct { - Offset int64 - Metadata string - Err KError + Offset int64 + LeaderEpoch int32 + Metadata string + Err KError } -func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) { b.Offset, err = pd.getInt64() if err != nil { return err } + if version >= 5 { + b.LeaderEpoch, err = pd.getInt32() + if err != nil { + return err + } + } + b.Metadata, err = pd.getString() if err != nil { return err @@ -26,9 +34,13 @@ func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { return nil } -func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) { pe.putInt64(b.Offset) + if version >= 5 { + pe.putInt32(b.LeaderEpoch) + } + err = pe.putString(b.Metadata) if err != nil { return err @@ -40,10 +52,17 @@ func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { } type OffsetFetchResponse struct { - Blocks map[string]map[int32]*OffsetFetchResponseBlock + Version int16 + ThrottleTimeMs int32 + Blocks map[string]map[int32]*OffsetFetchResponseBlock + Err KError } func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if r.Version >= 3 { + pe.putInt32(r.ThrottleTimeMs) + } + if err := pe.putArrayLength(len(r.Blocks)); err != nil { return err } @@ -56,53 +75,75 @@ func (r *OffsetFetchResponse) encode(pe packetEncoder) error { } for partition, block := range partitions { pe.putInt32(partition) - if err := block.encode(pe); err != nil { + if err := block.encode(pe, r.Version); err != nil { return err } } } + if r.Version >= 2 { + pe.putInt16(int16(r.Err)) + } return nil } func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } + r.Version = version - numBlocks, err := pd.getArrayLength() + if version >= 3 { + r.ThrottleTimeMs, err = pd.getInt32() if err != nil { return err } + } - if numBlocks == 0 { - r.Blocks[name] = nil - continue - } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() + if numTopics > 0 { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() if err != nil { return err } - block := new(OffsetFetchResponseBlock) - err = block.decode(pd) + numBlocks, err := pd.getArrayLength() if err != nil { return err } - r.Blocks[name][id] = block + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } } } + if version >= 2 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + } + return nil } @@ -111,11 +152,28 @@ func (r *OffsetFetchResponse) key() int16 { } func (r *OffsetFetchResponse) version() int16 { + return r.Version +} + +func (r *OffsetFetchResponse) headerVersion() int16 { return 0 } func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { - return minVersion + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_10_2_0 + case 3: + return V0_11_0_0 + case 4: + return V2_0_0_0 + case 5: + return V2_1_0_0 + default: + return MinVersion + } } func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go index 5e15cdaf..19408729 100644 --- a/vendor/github.com/Shopify/sarama/offset_manager.go +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -25,27 +25,49 @@ type offsetManager struct { client Client conf *Config group string + ticker *time.Ticker - lock sync.Mutex - poms map[string]map[int32]*partitionOffsetManager - boms map[*Broker]*brokerOffsetManager + memberID string + generation int32 + + broker *Broker + brokerLock sync.RWMutex + + poms map[string]map[int32]*partitionOffsetManager + pomsLock sync.RWMutex + + closeOnce sync.Once + closing chan none + closed chan none } // NewOffsetManagerFromClient creates a new OffsetManager from the given client. // It is still necessary to call Close() on the underlying client when finished with the partition manager. func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client) +} + +func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) { // Check that we are not dealing with a closed Client before processing any other arguments if client.Closed() { return nil, ErrClosedClient } + conf := client.Config() om := &offsetManager{ client: client, - conf: client.Config(), + conf: conf, group: group, + ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval), poms: make(map[string]map[int32]*partitionOffsetManager), - boms: make(map[*Broker]*brokerOffsetManager), + + memberID: memberID, + generation: generation, + + closing: make(chan none), + closed: make(chan none), } + go withRecover(om.mainLoop) return om, nil } @@ -56,8 +78,8 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti return nil, err } - om.lock.Lock() - defer om.lock.Unlock() + om.pomsLock.Lock() + defer om.pomsLock.Unlock() topicManagers := om.poms[topic] if topicManagers == nil { @@ -74,53 +96,319 @@ func (om *offsetManager) ManagePartition(topic string, partition int32) (Partiti } func (om *offsetManager) Close() error { + om.closeOnce.Do(func() { + // exit the mainLoop + close(om.closing) + <-om.closed + + // mark all POMs as closed + om.asyncClosePOMs() + + // flush one last time + for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ { + om.flushToBroker() + if om.releasePOMs(false) == 0 { + break + } + } + + om.releasePOMs(true) + om.brokerLock.Lock() + om.broker = nil + om.brokerLock.Unlock() + }) return nil } -func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) computeBackoff(retries int) time.Duration { + if om.conf.Metadata.Retry.BackoffFunc != nil { + return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max) + } else { + return om.conf.Metadata.Retry.Backoff + } +} + +func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) { + broker, err := om.coordinator() + if err != nil { + if retries <= 0 { + return 0, "", err + } + return om.fetchInitialOffset(topic, partition, retries-1) + } + + req := new(OffsetFetchRequest) + req.Version = 1 + req.ConsumerGroup = om.group + req.AddPartition(topic, partition) + + resp, err := broker.FetchOffset(req) + if err != nil { + if retries <= 0 { + return 0, "", err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + } + + block := resp.GetBlock(topic, partition) + if block == nil { + return 0, "", ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + return block.Offset, block.Metadata, nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return 0, "", block.Err + } + om.releaseCoordinator(broker) + return om.fetchInitialOffset(topic, partition, retries-1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return 0, "", block.Err + } + backoff := om.computeBackoff(retries) + select { + case <-om.closing: + return 0, "", block.Err + case <-time.After(backoff): + } + return om.fetchInitialOffset(topic, partition, retries-1) + default: + return 0, "", block.Err + } +} + +func (om *offsetManager) coordinator() (*Broker, error) { + om.brokerLock.RLock() + broker := om.broker + om.brokerLock.RUnlock() + + if broker != nil { + return broker, nil + } + + om.brokerLock.Lock() + defer om.brokerLock.Unlock() - bom := om.boms[broker] - if bom == nil { - bom = om.newBrokerOffsetManager(broker) - om.boms[broker] = bom + if broker := om.broker; broker != nil { + return broker, nil } - bom.refs++ + if err := om.client.RefreshCoordinator(om.group); err != nil { + return nil, err + } - return bom + broker, err := om.client.Coordinator(om.group) + if err != nil { + return nil, err + } + + om.broker = broker + return broker, nil } -func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) releaseCoordinator(b *Broker) { + om.brokerLock.Lock() + if om.broker == b { + om.broker = nil + } + om.brokerLock.Unlock() +} - bom.refs-- +func (om *offsetManager) mainLoop() { + defer om.ticker.Stop() + defer close(om.closed) - if bom.refs == 0 { - close(bom.updateSubscriptions) - if om.boms[bom.broker] == bom { - delete(om.boms, bom.broker) + for { + select { + case <-om.ticker.C: + om.flushToBroker() + om.releasePOMs(false) + case <-om.closing: + return } } } -func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +// flushToBroker is ignored if auto-commit offsets is disabled +func (om *offsetManager) flushToBroker() { + if !om.conf.Consumer.Offsets.AutoCommit.Enable { + return + } - delete(om.boms, bom.broker) + req := om.constructRequest() + if req == nil { + return + } + + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + resp, err := broker.CommitOffset(req) + if err != nil { + om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() + return + } + + om.handleResponse(broker, req, resp) } -func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { - om.lock.Lock() - defer om.lock.Unlock() +func (om *offsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if om.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: om.group, + ConsumerID: om.memberID, + ConsumerGroupGeneration: om.generation, + } + } + + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() - delete(om.poms[pom.topic], pom.partition) - if len(om.poms[pom.topic]) == 0 { - delete(om.poms, pom.topic) + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.lock.Lock() + if pom.dirty { + r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata) + } + pom.lock.Unlock() + } } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil { + continue + } + + var err KError + var ok bool + + if resp.Errors[pom.topic] == nil { + pom.handleError(ErrIncompleteResponse) + continue + } + if err, ok = resp.Errors[pom.topic][pom.partition]; !ok { + pom.handleError(ErrIncompleteResponse) + continue + } + + switch err { + case ErrNoError: + block := req.blocks[pom.topic][pom.partition] + pom.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + om.releaseCoordinator(broker) + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + pom.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata req and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + pom.handleError(err) + om.releaseCoordinator(broker) + } + } + } +} + +func (om *offsetManager) handleError(err error) { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.handleError(err) + } + } +} + +func (om *offsetManager) asyncClosePOMs() { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + for _, topicManagers := range om.poms { + for _, pom := range topicManagers { + pom.AsyncClose() + } + } +} + +// Releases/removes closed POMs once they are clean (or when forced) +func (om *offsetManager) releasePOMs(force bool) (remaining int) { + om.pomsLock.Lock() + defer om.pomsLock.Unlock() + + for topic, topicManagers := range om.poms { + for partition, pom := range topicManagers { + pom.lock.Lock() + releaseDue := pom.done && (force || !pom.dirty) + pom.lock.Unlock() + + if releaseDue { + pom.release() + + delete(om.poms[topic], partition) + if len(om.poms[topic]) == 0 { + delete(om.poms, topic) + } + } + } + remaining += len(om.poms[topic]) + } + return +} + +func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager { + om.pomsLock.RLock() + defer om.pomsLock.RUnlock() + + if partitions, ok := om.poms[topic]; ok { + if pom, ok := partitions[partition]; ok { + return pom + } + } + return nil } // Partition Offset Manager @@ -151,6 +439,13 @@ type PartitionOffsetManager interface { // message twice, and your processing should ideally be idempotent. MarkOffset(offset int64, metadata string) + // ResetOffset resets to the provided offset, alongside a metadata string that + // represents the state of the partition consumer at that point in time. Reset + // acts as a counterpart to MarkOffset, the difference being that it allows to + // reset an offset to an earlier or smaller value, where MarkOffset only + // allows incrementing the offset. cf MarkOffset for more details. + ResetOffset(offset int64, metadata string) + // Errors returns a read channel of errors that occur during offset management, if // enabled. By default, errors are logged and not returned over this channel. If // you want to implement any custom error handling, set your config's @@ -180,138 +475,26 @@ type partitionOffsetManager struct { offset int64 metadata string dirty bool - clean sync.Cond - broker *brokerOffsetManager + done bool - errors chan *ConsumerError - rebalance chan none - dying chan none + releaseOnce sync.Once + errors chan *ConsumerError } func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { - pom := &partitionOffsetManager{ + offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max) + if err != nil { + return nil, err + } + + return &partitionOffsetManager{ parent: om, topic: topic, partition: partition, errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), - rebalance: make(chan none, 1), - dying: make(chan none), - } - pom.clean.L = &pom.lock - - if err := pom.selectBroker(); err != nil { - return nil, err - } - - if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { - return nil, err - } - - pom.broker.updateSubscriptions <- pom - - go withRecover(pom.mainLoop) - - return pom, nil -} - -func (pom *partitionOffsetManager) mainLoop() { - for { - select { - case <-pom.rebalance: - if err := pom.selectBroker(); err != nil { - pom.handleError(err) - pom.rebalance <- none{} - } else { - pom.broker.updateSubscriptions <- pom - } - case <-pom.dying: - if pom.broker != nil { - select { - case <-pom.rebalance: - case pom.broker.updateSubscriptions <- pom: - } - pom.parent.unrefBrokerOffsetManager(pom.broker) - } - pom.parent.abandonPartitionOffsetManager(pom) - close(pom.errors) - return - } - } -} - -func (pom *partitionOffsetManager) selectBroker() error { - if pom.broker != nil { - pom.parent.unrefBrokerOffsetManager(pom.broker) - pom.broker = nil - } - - var broker *Broker - var err error - - if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { - return err - } - - if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { - return err - } - - pom.broker = pom.parent.refBrokerOffsetManager(broker) - return nil -} - -func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { - request := new(OffsetFetchRequest) - request.Version = 1 - request.ConsumerGroup = pom.parent.group - request.AddPartition(pom.topic, pom.partition) - - response, err := pom.broker.broker.FetchOffset(request) - if err != nil { - return err - } - - block := response.GetBlock(pom.topic, pom.partition) - if block == nil { - return ErrIncompleteResponse - } - - switch block.Err { - case ErrNoError: - pom.offset = block.Offset - pom.metadata = block.Metadata - return nil - case ErrNotCoordinatorForConsumer: - if retries <= 0 { - return block.Err - } - if err := pom.selectBroker(); err != nil { - return err - } - return pom.fetchInitialOffset(retries - 1) - case ErrOffsetsLoadInProgress: - if retries <= 0 { - return block.Err - } - time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) - return pom.fetchInitialOffset(retries - 1) - default: - return block.Err - } -} - -func (pom *partitionOffsetManager) handleError(err error) { - cErr := &ConsumerError{ - Topic: pom.topic, - Partition: pom.partition, - Err: err, - } - - if pom.parent.conf.Consumer.Return.Errors { - pom.errors <- cErr - } else { - Logger.Println(cErr) - } + offset: offset, + metadata: metadata, + }, nil } func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { @@ -329,13 +512,23 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { } } +func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset <= pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { pom.lock.Lock() defer pom.lock.Unlock() if pom.offset == offset && pom.metadata == metadata { pom.dirty = false - pom.clean.Signal() } } @@ -351,16 +544,9 @@ func (pom *partitionOffsetManager) NextOffset() (int64, string) { } func (pom *partitionOffsetManager) AsyncClose() { - go func() { - pom.lock.Lock() - defer pom.lock.Unlock() - - for pom.dirty { - pom.clean.Wait() - } - - close(pom.dying) - }() + pom.lock.Lock() + pom.done = true + pom.lock.Unlock() } func (pom *partitionOffsetManager) Close() error { @@ -377,166 +563,22 @@ func (pom *partitionOffsetManager) Close() error { return nil } -// Broker Offset Manager - -type brokerOffsetManager struct { - parent *offsetManager - broker *Broker - timer *time.Ticker - updateSubscriptions chan *partitionOffsetManager - subscriptions map[*partitionOffsetManager]none - refs int -} - -func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { - bom := &brokerOffsetManager{ - parent: om, - broker: broker, - timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), - updateSubscriptions: make(chan *partitionOffsetManager), - subscriptions: make(map[*partitionOffsetManager]none), - } - - go withRecover(bom.mainLoop) - - return bom -} - -func (bom *brokerOffsetManager) mainLoop() { - for { - select { - case <-bom.timer.C: - if len(bom.subscriptions) > 0 { - bom.flushToBroker() - } - case s, ok := <-bom.updateSubscriptions: - if !ok { - bom.timer.Stop() - return - } - if _, ok := bom.subscriptions[s]; ok { - delete(bom.subscriptions, s) - } else { - bom.subscriptions[s] = none{} - } - } - } -} - -func (bom *brokerOffsetManager) flushToBroker() { - request := bom.constructRequest() - if request == nil { - return - } - - response, err := bom.broker.CommitOffset(request) - - if err != nil { - bom.abort(err) - return - } - - for s := range bom.subscriptions { - if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { - continue - } - - var err KError - var ok bool - - if response.Errors[s.topic] == nil { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - if err, ok = response.Errors[s.topic][s.partition]; !ok { - s.handleError(ErrIncompleteResponse) - delete(bom.subscriptions, s) - s.rebalance <- none{} - continue - } - - switch err { - case ErrNoError: - block := request.blocks[s.topic][s.partition] - s.updateCommitted(block.offset, block.metadata) - case ErrNotLeaderForPartition, ErrLeaderNotAvailable, - ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: - // not a critical error, we just need to redispatch - delete(bom.subscriptions, s) - s.rebalance <- none{} - case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: - // nothing we can do about this, just tell the user and carry on - s.handleError(err) - case ErrOffsetsLoadInProgress: - // nothing wrong but we didn't commit, we'll get it next time round - break - case ErrUnknownTopicOrPartition: - // let the user know *and* try redispatching - if topic-auto-create is - // enabled, redispatching should trigger a metadata request and create the - // topic; if not then re-dispatching won't help, but we've let the user - // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) - fallthrough - default: - // dunno, tell the user and try redispatching - s.handleError(err) - delete(bom.subscriptions, s) - s.rebalance <- none{} - } +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, } -} -func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { - var r *OffsetCommitRequest - var perPartitionTimestamp int64 - if bom.parent.conf.Consumer.Offsets.Retention == 0 { - perPartitionTimestamp = ReceiveTime - r = &OffsetCommitRequest{ - Version: 1, - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr } else { - r = &OffsetCommitRequest{ - Version: 2, - RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), - ConsumerGroup: bom.parent.group, - ConsumerGroupGeneration: GroupGenerationUndefined, - } - - } - - for s := range bom.subscriptions { - s.lock.Lock() - if s.dirty { - r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) - } - s.lock.Unlock() - } - - if len(r.blocks) > 0 { - return r + Logger.Println(cErr) } - - return nil } -func (bom *brokerOffsetManager) abort(err error) { - _ = bom.broker.Close() // we don't care about the error this might return, we already have one - bom.parent.abandonBroker(bom) - - for pom := range bom.subscriptions { - pom.handleError(err) - pom.rebalance <- none{} - } - - for s := range bom.updateSubscriptions { - if _, ok := bom.subscriptions[s]; !ok { - s.handleError(err) - s.rebalance <- none{} - } - } - - bom.subscriptions = make(map[*partitionOffsetManager]none) +func (pom *partitionOffsetManager) release() { + pom.releaseOnce.Do(func() { + close(pom.errors) + }) } diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go index 6c269601..c0b3305f 100644 --- a/vendor/github.com/Shopify/sarama/offset_request.go +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -6,7 +6,7 @@ type offsetRequestBlock struct { } func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(int64(b.time)) + pe.putInt64(b.time) if version == 0 { pe.putInt32(b.maxOffsets) } @@ -27,12 +27,20 @@ func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) } type OffsetRequest struct { - Version int16 - blocks map[string]map[int32]*offsetRequestBlock + Version int16 + replicaID int32 + isReplicaIDSet bool + blocks map[string]map[int32]*offsetRequestBlock } func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients + if r.isReplicaIDSet { + pe.putInt32(r.replicaID) + } else { + // default replica ID is always -1 for clients + pe.putInt32(-1) + } + err := pe.putArrayLength(len(r.blocks)) if err != nil { return err @@ -59,10 +67,14 @@ func (r *OffsetRequest) encode(pe packetEncoder) error { func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { r.Version = version - // Ignore replica ID - if _, err := pd.getInt32(); err != nil { + replicaID, err := pd.getInt32() + if err != nil { return err } + if replicaID >= 0 { + r.SetReplicaID(replicaID) + } + blockCount, err := pd.getArrayLength() if err != nil { return err @@ -104,13 +116,29 @@ func (r *OffsetRequest) version() int16 { return r.Version } +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_10_1_0 default: - return minVersion + return MinVersion + } +} + +func (r *OffsetRequest) SetReplicaID(id int32) { + r.replicaID = id + r.isReplicaIDSet = true +} + +func (r *OffsetRequest) ReplicaID() int32 { + if r.isReplicaIDSet { + return r.replicaID } + return -1 } func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go index 9a9cfe96..ead3ebbc 100644 --- a/vendor/github.com/Shopify/sarama/offset_response.go +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -150,12 +150,16 @@ func (r *OffsetResponse) version() int16 { return r.Version } +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_10_1_0 default: - return minVersion + return MinVersion } } diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go index 28670c0e..ed00ba35 100644 --- a/vendor/github.com/Shopify/sarama/packet_decoder.go +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -9,11 +9,22 @@ type packetDecoder interface { getInt16() (int16, error) getInt32() (int32, error) getInt64() (int64, error) + getVarint() (int64, error) + getUVarint() (uint64, error) getArrayLength() (int, error) + getCompactArrayLength() (int, error) + getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) // Collections getBytes() ([]byte, error) + getVarintBytes() ([]byte, error) + getRawBytes(length int) ([]byte, error) getString() (string, error) + getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) @@ -21,6 +32,8 @@ type packetDecoder interface { // Subsets remaining() int getSubset(length int) (packetDecoder, error) + peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset + peekInt8(offset int) (int8, error) // similar to peek, but just one byte // Stacks, see PushDecoder push(in pushDecoder) error @@ -43,3 +56,12 @@ type pushDecoder interface { // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. check(curOffset int, buf []byte) error } + +// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the +// fields itself is unknown until its value was decoded (for instance varint encoded length +// fields). +// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength() +type dynamicPushDecoder interface { + pushDecoder + decoder +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go index 27a10f6d..50c735c0 100644 --- a/vendor/github.com/Shopify/sarama/packet_encoder.go +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -11,15 +11,26 @@ type packetEncoder interface { putInt16(in int16) putInt32(in int32) putInt64(in int64) + putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) putArrayLength(in int) error + putBool(in bool) // Collections putBytes(in []byte) error + putVarintBytes(in []byte) error putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error putString(in string) error + putNullableString(in *string) error putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error putInt32Array(in []int32) error putInt64Array(in []int64) error + putEmptyTaggedFieldArray() // Provide the current offset to record the batch size metric offset() int @@ -48,3 +59,14 @@ type pushEncoder interface { // of data to the saved offset, based on the data between the saved offset and curOffset. run(curOffset int, buf []byte) error } + +// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the +// fields itself is unknown until its value was computed (for instance varint encoded length +// fields). +type dynamicPushEncoder interface { + pushEncoder + + // Called during pop() to adjust the length of the field. + // It should return the difference in bytes between the last computed length and current length. + adjustLength(currOffset int) int +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go index d24199da..6a708e72 100644 --- a/vendor/github.com/Shopify/sarama/partitioner.go +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -22,11 +22,51 @@ type Partitioner interface { RequiresConsistency() bool } +// DynamicConsistencyPartitioner can optionally be implemented by Partitioners +// in order to allow more flexibility than is originally allowed by the +// RequiresConsistency method in the Partitioner interface. This allows +// partitioners to require consistency sometimes, but not all times. It's useful +// for, e.g., the HashPartitioner, which does not require consistency if the +// message key is nil. +type DynamicConsistencyPartitioner interface { + Partitioner + + // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency, + // but takes in the message being partitioned so that the partitioner can + // make a per-message determination. + MessageRequiresConsistency(message *ProducerMessage) bool +} + // PartitionerConstructor is the type for a function capable of constructing new Partitioners. type PartitionerConstructor func(topic string) Partitioner type manualPartitioner struct{} +// HashPartitionOption lets you modify default values of the partitioner +type HashPartitionerOption func(*hashPartitioner) + +// WithAbsFirst means that the partitioner handles absolute values +// in the same way as the reference Java implementation +func WithAbsFirst() HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.referenceAbs = true + } +} + +// WithCustomHashFunction lets you specify what hash function to use for the partitioning +func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.hasher = hasher() + } +} + +// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty +func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption { + return func(hp *hashPartitioner) { + hp.random = hp + } +} + // NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided // ProducerMessage's Partition field as the partition to produce to. func NewManualPartitioner(topic string) Partitioner { @@ -83,8 +123,36 @@ func (p *roundRobinPartitioner) RequiresConsistency() bool { } type hashPartitioner struct { - random Partitioner - hasher hash.Hash32 + random Partitioner + hasher hash.Hash32 + referenceAbs bool +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + p.referenceAbs = false + return p + } +} + +// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options +func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = false + for _, option := range options { + option(p) + } + return p + } } // NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a @@ -95,6 +163,19 @@ func NewHashPartitioner(topic string) Partitioner { p := new(hashPartitioner) p.random = NewRandomPartitioner(topic) p.hasher = fnv.New32a() + p.referenceAbs = false + return p +} + +// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values +// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do +// that but it had a mistake and now there are people depending on both behaviours. This will +// all go away on the next major version bump. +func NewReferenceHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + p.referenceAbs = true return p } @@ -111,9 +192,18 @@ func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int3 if err != nil { return -1, err } - partition := int32(p.hasher.Sum32()) % numPartitions - if partition < 0 { - partition = -partition + var partition int32 + // Turns out we were doing our absolute value in a subtly different way from the upstream + // implementation, but now we need to maintain backwards compat for people who started using + // the old version; if referenceAbs is set we are compatible with the reference java client + // but not past Sarama versions + if p.referenceAbs { + partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions + } else { + partition = int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } } return partition, nil } @@ -121,3 +211,7 @@ func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int3 func (p *hashPartitioner) RequiresConsistency() bool { return true } + +func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool { + return message.Key != nil +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go index fd5ea0f9..827542c5 100644 --- a/vendor/github.com/Shopify/sarama/prep_encoder.go +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -1,6 +1,8 @@ package sarama import ( + "encoding/binary" + "errors" "fmt" "math" @@ -8,6 +10,7 @@ import ( ) type prepEncoder struct { + stack []pushEncoder length int } @@ -29,6 +32,16 @@ func (pe *prepEncoder) putInt64(in int64) { pe.length += 8 } +func (pe *prepEncoder) putVarint(in int64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutVarint(buf[:], in) +} + +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -37,6 +50,14 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + +func (pe *prepEncoder) putBool(in bool) { + pe.length++ +} + // arrays func (pe *prepEncoder) putBytes(in []byte) error { @@ -44,11 +65,30 @@ func (pe *prepEncoder) putBytes(in []byte) error { if in == nil { return nil } - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putVarintBytes(in []byte) error { + if in == nil { + pe.putVarint(-1) + return nil + } + pe.putVarint(int64(len(in))) + return pe.putRawBytes(in) +} + +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) } - pe.length += len(in) - return nil } func (pe *prepEncoder) putRawBytes(in []byte) error { @@ -59,6 +99,14 @@ func (pe *prepEncoder) putRawBytes(in []byte) error { return nil } +func (pe *prepEncoder) putNullableString(in *string) error { + if in == nil { + pe.length += 2 + return nil + } + return pe.putString(*in) +} + func (pe *prepEncoder) putString(in string) error { pe.length += 2 if len(in) > math.MaxInt16 { @@ -83,6 +131,27 @@ func (pe *prepEncoder) putStringArray(in []string) error { return nil } +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + func (pe *prepEncoder) putInt32Array(in []int32) error { err := pe.putArrayLength(len(in)) if err != nil { @@ -101,6 +170,10 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + func (pe *prepEncoder) offset() int { return pe.length } @@ -108,10 +181,18 @@ func (pe *prepEncoder) offset() int { // stackable func (pe *prepEncoder) push(in pushEncoder) { + in.saveOffset(pe.length) pe.length += in.reserveLength() + pe.stack = append(pe.stack, in) } func (pe *prepEncoder) pop() error { + in := pe.stack[len(pe.stack)-1] + pe.stack = pe.stack[:len(pe.stack)-1] + if dpe, ok := in.(dynamicPushEncoder); ok { + pe.length += dpe.adjustLength(pe.length) + } + return nil } diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go index 40dc8015..0034651e 100644 --- a/vendor/github.com/Shopify/sarama/produce_request.go +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -21,19 +21,56 @@ const ( ) type ProduceRequest struct { - RequiredAcks RequiredAcks - Timeout int32 - Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10 - msgSets map[string]map[int32]*MessageSet + TransactionalID *string + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11 + records map[string]map[int32]Records +} + +func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + var topicRecordCount int64 + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + return topicRecordCount +} + +func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram, + topicCompressionRatioMetric metrics.Histogram) int64 { + if recordBatch.compressedRecords != nil { + compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100) + compressionRatioMetric.Update(compressionRatio) + topicCompressionRatioMetric.Update(compressionRatio) + } + + return int64(len(recordBatch.Records)) } func (r *ProduceRequest) encode(pe packetEncoder) error { + if r.Version >= 3 { + if err := pe.putNullableString(r.TransactionalID); err != nil { + return err + } + } pe.putInt16(int16(r.RequiredAcks)) pe.putInt32(r.Timeout) - err := pe.putArrayLength(len(r.msgSets)) - if err != nil { - return err - } metricRegistry := pe.metricRegistry() var batchSizeMetric metrics.Histogram var compressionRatioMetric metrics.Histogram @@ -41,9 +78,14 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) } - totalRecordCount := int64(0) - for topic, partitions := range r.msgSets { + + err := pe.putArrayLength(len(r.records)) + if err != nil { + return err + } + + for topic, partitions := range r.records { err = pe.putString(topic) if err != nil { return err @@ -57,11 +99,11 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { if metricRegistry != nil { topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) } - for id, msgSet := range partitions { + for id, records := range partitions { startOffset := pe.offset() pe.putInt32(id) pe.push(&lengthField{}) - err = msgSet.encode(pe) + err = records.encode(pe) if err != nil { return err } @@ -70,23 +112,10 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { return err } if metricRegistry != nil { - for _, messageBlock := range msgSet.Messages { - // Is this a fake "message" wrapping real messages? - if messageBlock.Msg.Set != nil { - topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) - } else { - // A single uncompressed message - topicRecordCount++ - } - // Better be safe than sorry when computing the compression ratio - if messageBlock.Msg.compressedSize != 0 { - compressionRatio := float64(len(messageBlock.Msg.Value)) / - float64(messageBlock.Msg.compressedSize) - // Histogram do not support decimal values, let's multiple it by 100 for better precision - intCompressionRatio := int64(100 * compressionRatio) - compressionRatioMetric.Update(intCompressionRatio) - topicCompressionRatioMetric.Update(intCompressionRatio) - } + if r.Version >= 3 { + topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric) + } else { + topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric) } batchSize := int64(pe.offset() - startOffset) batchSizeMetric.Update(batchSize) @@ -108,6 +137,15 @@ func (r *ProduceRequest) encode(pe packetEncoder) error { } func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + if version >= 3 { + id, err := pd.getNullableString() + if err != nil { + return err + } + r.TransactionalID = id + } requiredAcks, err := pd.getInt16() if err != nil { return err @@ -123,7 +161,8 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { if topicCount == 0 { return nil } - r.msgSets = make(map[string]map[int32]*MessageSet) + + r.records = make(map[string]map[int32]Records) for i := 0; i < topicCount; i++ { topic, err := pd.getString() if err != nil { @@ -133,28 +172,29 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { if err != nil { return err } - r.msgSets[topic] = make(map[int32]*MessageSet) + r.records[topic] = make(map[int32]Records) + for j := 0; j < partitionCount; j++ { partition, err := pd.getInt32() if err != nil { return err } - messageSetSize, err := pd.getInt32() + size, err := pd.getInt32() if err != nil { return err } - msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + recordsDecoder, err := pd.getSubset(int(size)) if err != nil { return err } - msgSet := &MessageSet{} - err = msgSet.decode(msgSetDecoder) - if err != nil { + var records Records + if err := records.decode(recordsDecoder); err != nil { return err } - r.msgSets[topic][partition] = msgSet + r.records[topic][partition] = records } } + return nil } @@ -166,44 +206,53 @@ func (r *ProduceRequest) version() int16 { return r.Version } +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: return V0_9_0_0 case 2: return V0_10_0_0 + case 3: + return V0_11_0_0 + case 7: + return V2_1_0_0 default: - return minVersion + return MinVersion } } -func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) +func (r *ProduceRequest) ensureRecords(topic string, partition int32) { + if r.records == nil { + r.records = make(map[string]map[int32]Records) } - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) + if r.records[topic] == nil { + r.records[topic] = make(map[int32]Records) } +} - set := r.msgSets[topic][partition] +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + r.ensureRecords(topic, partition) + set := r.records[topic][partition].MsgSet if set == nil { set = new(MessageSet) - r.msgSets[topic][partition] = set + r.records[topic][partition] = newLegacyRecords(set) } set.addMessage(msg) } func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - if r.msgSets == nil { - r.msgSets = make(map[string]map[int32]*MessageSet) - } - - if r.msgSets[topic] == nil { - r.msgSets[topic] = make(map[int32]*MessageSet) - } + r.ensureRecords(topic, partition) + r.records[topic][partition] = newLegacyRecords(set) +} - r.msgSets[topic][partition] = set +func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) { + r.ensureRecords(topic, partition) + r.records[topic][partition] = newDefaultRecords(batch) } diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go index 195abcb8..edf97879 100644 --- a/vendor/github.com/Shopify/sarama/produce_response.go +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -1,12 +1,31 @@ package sarama -import "time" - +import ( + "fmt" + "time" +) + +// Protocol, http://kafka.apache.org/protocol.html +// v1 +// v2 = v3 = v4 +// v5 = v6 = v7 +// Produce Response (Version: 7) => [responses] throttle_time_ms +// responses => topic [partition_responses] +// topic => STRING +// partition_responses => partition error_code base_offset log_append_time log_start_offset +// partition => INT32 +// error_code => INT16 +// base_offset => INT64 +// log_append_time => INT64 +// log_start_offset => INT64 +// throttle_time_ms => INT32 + +// partition_responses in protocol type ProduceResponseBlock struct { - Err KError - Offset int64 - // only provided if Version >= 2 and the broker is configured with `LogAppendTime` - Timestamp time.Time + Err KError // v0, error_code + Offset int64 // v0, base_offset + Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime` + StartOffset int64 // v5, log_start_offset } func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { @@ -29,13 +48,41 @@ func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err erro } } + if version >= 5 { + b.StartOffset, err = pd.getInt64() + if err != nil { + return err + } + } + + return nil +} + +func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + pe.putInt64(b.Offset) + + if version >= 2 { + timestamp := int64(-1) + if !b.Timestamp.Before(time.Unix(0, 0)) { + timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !b.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)} + } + pe.putInt64(timestamp) + } + + if version >= 5 { + pe.putInt64(b.StartOffset) + } + return nil } type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock + Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses Version int16 - ThrottleTime time.Duration // only provided if Version >= 1 + ThrottleTime time.Duration // v1, throttle_time_ms } func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { @@ -76,11 +123,12 @@ func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { } if r.Version >= 1 { - if millis, err := pd.getInt32(); err != nil { + millis, err := pd.getInt32() + if err != nil { return err - } else { - r.ThrottleTime = time.Duration(millis) * time.Millisecond } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond } return nil @@ -102,10 +150,13 @@ func (r *ProduceResponse) encode(pe packetEncoder) error { } for id, prb := range partitions { pe.putInt32(id) - pe.putInt16(int16(prb.Err)) - pe.putInt64(prb.Offset) + err = prb.encode(pe, r.Version) + if err != nil { + return err + } } } + if r.Version >= 1 { pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) } @@ -120,15 +171,12 @@ func (r *ProduceResponse) version() int16 { return r.Version } +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + func (r *ProduceResponse) requiredVersion() KafkaVersion { - switch r.Version { - case 1: - return V0_9_0_0 - case 2: - return V0_10_0_0 - default: - return minVersion - } + return MinVersion } func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { @@ -154,5 +202,11 @@ func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err K byTopic = make(map[int32]*ProduceResponseBlock) r.Blocks[topic] = byTopic } - byTopic[partition] = &ProduceResponseBlock{Err: err} + block := &ProduceResponseBlock{ + Err: err, + } + if r.Version >= 2 { + block.Timestamp = time.Now() + } + byTopic[partition] = block } diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go index 158d9c47..9c70f818 100644 --- a/vendor/github.com/Shopify/sarama/produce_set.go +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -1,25 +1,34 @@ package sarama -import "time" +import ( + "encoding/binary" + "errors" + "time" +) type partitionSet struct { - msgs []*ProducerMessage - setToSend *MessageSet - bufferBytes int + msgs []*ProducerMessage + recordsToSend Records + bufferBytes int } type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 bufferBytes int bufferCount int } func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, } } @@ -39,31 +48,78 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } } + timestamp := msg.Timestamp + if timestamp.IsZero() { + timestamp = time.Now() + } + timestamp = timestamp.Truncate(time.Millisecond) + partitions := ps.msgs[msg.Topic] if partitions == nil { partitions = make(map[int32]*partitionSet) ps.msgs[msg.Topic] = partitions } + var size int + set := partitions[msg.Partition] if set == nil { - set = &partitionSet{setToSend: new(MessageSet)} + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + batch := &RecordBatch{ + FirstTimestamp: timestamp, + Version: 2, + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, + } + if ps.parent.conf.Producer.Idempotent { + batch.FirstSequence = msg.sequenceNumber + } + set = &partitionSet{recordsToSend: newDefaultRecords(batch)} + size = recordBatchOverhead + } else { + set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))} + } partitions[msg.Partition] = set } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { + return errors.New("assertion failed: message out of sequence added to a batch") + } + } + + // Past this point we can't return an error, because we've already added the message to the set. set.msgs = append(set.msgs, msg) - msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} - if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { - if msg.Timestamp.IsZero() { - msgToSend.Timestamp = time.Now() - } else { - msgToSend.Timestamp = msg.Timestamp + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + // We are being conservative here to avoid having to prep encode the record + size += maximumRecordOverhead + rec := &Record{ + Key: key, + Value: val, + TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp), } - msgToSend.Version = 1 + size += len(key) + len(val) + if len(msg.Headers) > 0 { + rec.Headers = make([]*RecordHeader, len(msg.Headers)) + for i := range msg.Headers { + rec.Headers[i] = &msg.Headers[i] + size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32 + } + } + set.recordsToSend.RecordBatch.addRecord(rec) + } else { + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + msgToSend.Timestamp = timestamp + msgToSend.Version = 1 + } + set.recordsToSend.MsgSet.addMessage(msgToSend) + size = producerMessageOverhead + len(key) + len(val) } - set.setToSend.addMessage(msgToSend) - size := producerMessageOverhead + len(key) + len(val) set.bufferBytes += size ps.bufferBytes += size ps.bufferCount++ @@ -79,30 +135,67 @@ func (ps *produceSet) buildRequest() *ProduceRequest { if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { req.Version = 2 } + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + req.Version = 3 + } - for topic, partitionSet := range ps.msgs { - for partition, set := range partitionSet { + if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) { + req.Version = 7 + } + + for topic, partitionSets := range ps.msgs { + for partition, set := range partitionSets { + if req.Version >= 3 { + // If the API version we're hitting is 3 or greater, we need to calculate + // offsets for each record in the batch relative to FirstOffset. + // Additionally, we must set LastOffsetDelta to the value of the last offset + // in the batch. Since the OffsetDelta of the first record is 0, we know that the + // final record of any batch will have an offset of (# of records in batch) - 1. + // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets + // under the RecordBatch section for details.) + rb := set.recordsToSend.RecordBatch + if len(rb.Records) > 0 { + rb.LastOffsetDelta = int32(len(rb.Records) - 1) + for i, record := range rb.Records { + record.OffsetDelta = int64(i) + } + } + req.AddBatch(topic, partition, rb) + continue + } if ps.parent.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, set.setToSend) + req.AddSet(topic, partition, set.recordsToSend.MsgSet) } else { // When compression is enabled, the entire set for each partition is compressed // and sent as the payload of a single fake "message" with the appropriate codec // set and no key. When the server sees a message with a compression codec, it // decompresses the payload and treats the result as its message set. - payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry) + + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + // If our version is 0.10 or later, assign relative offsets + // to the inner messages. This lets the broker avoid + // recompressing the message set. + // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets + // for details on relative offsets.) + for i, msg := range set.recordsToSend.MsgSet.Messages { + msg.Offset = int64(i) + } + } + payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry) if err != nil { Logger.Println(err) // if this happens, it's basically our fault. panic(err) } compMsg := &Message{ - Codec: ps.parent.conf.Producer.Compression, - Key: nil, - Value: payload, - Set: set.setToSend, // Provide the underlying message set for accurate metrics + Codec: ps.parent.conf.Producer.Compression, + CompressionLevel: ps.parent.conf.Producer.CompressionLevel, + Key: nil, + Value: payload, + Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics } if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { compMsg.Version = 1 - compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp + compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp } req.AddMessage(topic, partition, compMsg) } @@ -112,10 +205,10 @@ func (ps *produceSet) buildRequest() *ProduceRequest { return req } -func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) { for topic, partitionSet := range ps.msgs { for partition, set := range partitionSet { - cb(topic, partition, set.msgs) + cb(topic, partition, set) } } } @@ -135,14 +228,18 @@ func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMe } func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + version := 1 + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { + version = 2 + } + switch { // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)): return true - // Would we overflow the size-limit of a compressed message-batch for this partition? - case ps.parent.conf.Producer.Compression != CompressionNone && - ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && - ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: + // Would we overflow the size-limit of a message-batch for this partition? + case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes: return true // Would we overflow simply in number of messages? case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go index a0141af0..8ac576db 100644 --- a/vendor/github.com/Shopify/sarama/real_decoder.go +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -8,7 +8,10 @@ import ( var errInvalidArrayLength = PacketDecodingError{"invalid array length"} var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} var errInvalidStringLength = PacketDecodingError{"invalid string length"} -var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} +var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} +var errInvalidBool = PacketDecodingError{"invalid bool"} +var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} type realDecoder struct { raw []byte @@ -58,12 +61,42 @@ func (rd *realDecoder) getInt64() (int64, error) { return tmp, nil } +func (rd *realDecoder) getVarint() (int64, error) { + tmp, n := binary.Varint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + if n < 0 { + rd.off -= n + return -1, errVarintOverflow + } + rd.off += n + return tmp, nil +} + +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) return -1, ErrInsufficientData } - tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) rd.off += 4 if tmp > rd.remaining() { rd.off = len(rd.raw) @@ -74,60 +107,162 @@ func (rd *realDecoder) getArrayLength() (int, error) { return tmp, nil } +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + +func (rd *realDecoder) getBool() (bool, error) { + b, err := rd.getInt8() + if err != nil || b == 0 { + return false, err + } + if b != 1 { + return false, errInvalidBool + } + return true, nil +} + +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { tmp, err := rd.getInt32() + if err != nil { + return nil, err + } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} +func (rd *realDecoder) getVarintBytes() ([]byte, error) { + tmp, err := rd.getVarint() if err != nil { return nil, err } + if tmp == -1 { + return nil, nil + } + + return rd.getRawBytes(int(tmp)) +} + +func (rd *realDecoder) getStringLength() (int, error) { + length, err := rd.getInt16() + if err != nil { + return 0, err + } - n := int(tmp) + n := int(length) switch { case n < -1: - return nil, errInvalidByteSliceLength - case n == -1: - return nil, nil - case n == 0: - return make([]byte, 0), nil + return 0, errInvalidStringLength case n > rd.remaining(): rd.off = len(rd.raw) - return nil, ErrInsufficientData + return 0, ErrInsufficientData } - tmpStr := rd.raw[rd.off : rd.off+n] - rd.off += n - return tmpStr, nil + return n, nil } func (rd *realDecoder) getString() (string, error) { - tmp, err := rd.getInt16() - - if err != nil { + n, err := rd.getStringLength() + if err != nil || n == -1 { return "", err } - n := int(tmp) + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} - switch { - case n < -1: - return "", errInvalidStringLength - case n == -1: - return "", nil - case n == 0: - return "", nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return "", ErrInsufficientData +func (rd *realDecoder) getNullableString() (*string, error) { + n, err := rd.getStringLength() + if err != nil || n == -1 { + return nil, err } tmpStr := string(rd.raw[rd.off : rd.off+n]) rd.off += n + return &tmpStr, err +} + +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + var length = int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length return tmpStr, nil } +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + + if err != nil { + return nil, err + } + + var length = int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -204,11 +339,12 @@ func (rd *realDecoder) getStringArray() ([]string, error) { ret := make([]string, n) for i := range ret { - if str, err := rd.getString(); err != nil { + str, err := rd.getString() + if err != nil { return nil, err - } else { - ret[i] = str } + + ret[i] = str } return ret, nil } @@ -220,8 +356,16 @@ func (rd *realDecoder) remaining() int { } func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + buf, err := rd.getRawBytes(length) + if err != nil { + return nil, err + } + return &realDecoder{raw: buf}, nil +} + +func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { if length < 0 { - return nil, errInvalidSubsetSize + return nil, errInvalidByteSliceLength } else if length > rd.remaining() { rd.off = len(rd.raw) return nil, ErrInsufficientData @@ -229,7 +373,23 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { start := rd.off rd.off += length - return &realDecoder{raw: rd.raw[start:rd.off]}, nil + return rd.raw[start:rd.off], nil +} + +func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) { + if rd.remaining() < offset+length { + return nil, ErrInsufficientData + } + off := rd.off + offset + return &realDecoder{raw: rd.raw[off : off+length]}, nil +} + +func (rd *realDecoder) peekInt8(offset int) (int8, error) { + const byteLen = 1 + if rd.remaining() < offset+byteLen { + return -1, ErrInsufficientData + } + return int8(rd.raw[rd.off+offset]), nil } // stacks @@ -237,10 +397,17 @@ func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { func (rd *realDecoder) push(in pushDecoder) error { in.saveOffset(rd.off) - reserve := in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData + var reserve int + if dpd, ok := in.(dynamicPushDecoder); ok { + if err := dpd.decode(rd); err != nil { + return err + } + } else { + reserve = in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } } rd.stack = append(rd.stack, in) diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go index ced4267c..ba073f7d 100644 --- a/vendor/github.com/Shopify/sarama/real_encoder.go +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "github.com/rcrowley/go-metrics" ) @@ -35,11 +36,32 @@ func (re *realEncoder) putInt64(in int64) { re.off += 8 } +func (re *realEncoder) putVarint(in int64) { + re.off += binary.PutVarint(re.raw[re.off:], in) +} + +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + +func (re *realEncoder) putBool(in bool) { + if in { + re.putInt8(1) + return + } + re.putInt8(0) +} + // collection func (re *realEncoder) putRawBytes(in []byte) error { @@ -54,9 +76,29 @@ func (re *realEncoder) putBytes(in []byte) error { return nil } re.putInt32(int32(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil + return re.putRawBytes(in) +} + +func (re *realEncoder) putVarintBytes(in []byte) error { + if in == nil { + re.putVarint(-1) + return nil + } + re.putVarint(int64(len(in))) + return re.putRawBytes(in) +} + +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) } func (re *realEncoder) putString(in string) error { @@ -66,6 +108,14 @@ func (re *realEncoder) putString(in string) error { return nil } +func (re *realEncoder) putNullableString(in *string) error { + if in == nil { + re.putInt16(-1) + return nil + } + return re.putString(*in) +} + func (re *realEncoder) putStringArray(in []string) error { err := re.putArrayLength(len(in)) if err != nil { @@ -81,6 +131,31 @@ func (re *realEncoder) putStringArray(in []string) error { return nil } +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + func (re *realEncoder) putInt32Array(in []int32) error { err := re.putArrayLength(len(in)) if err != nil { @@ -103,6 +178,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + func (re *realEncoder) offset() int { return re.off } diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go new file mode 100644 index 00000000..cdccfe32 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record.go @@ -0,0 +1,116 @@ +package sarama + +import ( + "encoding/binary" + "time" +) + +const ( + isTransactionalMask = 0x10 + controlMask = 0x20 + maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1 +) + +//RecordHeader stores key and value for a record header +type RecordHeader struct { + Key []byte + Value []byte +} + +func (h *RecordHeader) encode(pe packetEncoder) error { + if err := pe.putVarintBytes(h.Key); err != nil { + return err + } + return pe.putVarintBytes(h.Value) +} + +func (h *RecordHeader) decode(pd packetDecoder) (err error) { + if h.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if h.Value, err = pd.getVarintBytes(); err != nil { + return err + } + return nil +} + +//Record is kafka record type +type Record struct { + Headers []*RecordHeader + + Attributes int8 + TimestampDelta time.Duration + OffsetDelta int64 + Key []byte + Value []byte + length varintLengthField +} + +func (r *Record) encode(pe packetEncoder) error { + pe.push(&r.length) + pe.putInt8(r.Attributes) + pe.putVarint(int64(r.TimestampDelta / time.Millisecond)) + pe.putVarint(r.OffsetDelta) + if err := pe.putVarintBytes(r.Key); err != nil { + return err + } + if err := pe.putVarintBytes(r.Value); err != nil { + return err + } + pe.putVarint(int64(len(r.Headers))) + + for _, h := range r.Headers { + if err := h.encode(pe); err != nil { + return err + } + } + + return pe.pop() +} + +func (r *Record) decode(pd packetDecoder) (err error) { + if err = pd.push(&r.length); err != nil { + return err + } + + if r.Attributes, err = pd.getInt8(); err != nil { + return err + } + + timestamp, err := pd.getVarint() + if err != nil { + return err + } + r.TimestampDelta = time.Duration(timestamp) * time.Millisecond + + if r.OffsetDelta, err = pd.getVarint(); err != nil { + return err + } + + if r.Key, err = pd.getVarintBytes(); err != nil { + return err + } + + if r.Value, err = pd.getVarintBytes(); err != nil { + return err + } + + numHeaders, err := pd.getVarint() + if err != nil { + return err + } + + if numHeaders >= 0 { + r.Headers = make([]*RecordHeader, numHeaders) + } + for i := int64(0); i < numHeaders; i++ { + hdr := new(RecordHeader) + if err := hdr.decode(pd); err != nil { + return err + } + r.Headers[i] = hdr + } + + return pd.pop() +} diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go new file mode 100644 index 00000000..c653763e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/record_batch.go @@ -0,0 +1,225 @@ +package sarama + +import ( + "fmt" + "time" +) + +const recordBatchOverhead = 49 + +type recordsArray []*Record + +func (e recordsArray) encode(pe packetEncoder) error { + for _, r := range e { + if err := r.encode(pe); err != nil { + return err + } + } + return nil +} + +func (e recordsArray) decode(pd packetDecoder) error { + for i := range e { + rec := &Record{} + if err := rec.decode(pd); err != nil { + return err + } + e[i] = rec + } + return nil +} + +type RecordBatch struct { + FirstOffset int64 + PartitionLeaderEpoch int32 + Version int8 + Codec CompressionCodec + CompressionLevel int + Control bool + LogAppendTime bool + LastOffsetDelta int32 + FirstTimestamp time.Time + MaxTimestamp time.Time + ProducerID int64 + ProducerEpoch int16 + FirstSequence int32 + Records []*Record + PartialTrailingRecord bool + IsTransactional bool + + compressedRecords []byte + recordsLen int // uncompressed records size +} + +func (b *RecordBatch) LastOffset() int64 { + return b.FirstOffset + int64(b.LastOffsetDelta) +} + +func (b *RecordBatch) encode(pe packetEncoder) error { + if b.Version != 2 { + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)} + } + pe.putInt64(b.FirstOffset) + pe.push(&lengthField{}) + pe.putInt32(b.PartitionLeaderEpoch) + pe.putInt8(b.Version) + pe.push(newCRC32Field(crcCastagnoli)) + pe.putInt16(b.computeAttributes()) + pe.putInt32(b.LastOffsetDelta) + + if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil { + return err + } + + if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil { + return err + } + + pe.putInt64(b.ProducerID) + pe.putInt16(b.ProducerEpoch) + pe.putInt32(b.FirstSequence) + + if err := pe.putArrayLength(len(b.Records)); err != nil { + return err + } + + if b.compressedRecords == nil { + if err := b.encodeRecords(pe); err != nil { + return err + } + } + if err := pe.putRawBytes(b.compressedRecords); err != nil { + return err + } + + if err := pe.pop(); err != nil { + return err + } + return pe.pop() +} + +func (b *RecordBatch) decode(pd packetDecoder) (err error) { + if b.FirstOffset, err = pd.getInt64(); err != nil { + return err + } + + batchLen, err := pd.getInt32() + if err != nil { + return err + } + + if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil { + return err + } + + if b.Version, err = pd.getInt8(); err != nil { + return err + } + + crc32Decoder := acquireCrc32Field(crcCastagnoli) + defer releaseCrc32Field(crc32Decoder) + + if err = pd.push(crc32Decoder); err != nil { + return err + } + + attributes, err := pd.getInt16() + if err != nil { + return err + } + b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask) + b.Control = attributes&controlMask == controlMask + b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask + b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask + + if b.LastOffsetDelta, err = pd.getInt32(); err != nil { + return err + } + + if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil { + return err + } + + if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil { + return err + } + + if b.ProducerID, err = pd.getInt64(); err != nil { + return err + } + + if b.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + if b.FirstSequence, err = pd.getInt32(); err != nil { + return err + } + + numRecs, err := pd.getArrayLength() + if err != nil { + return err + } + if numRecs >= 0 { + b.Records = make([]*Record, numRecs) + } + + bufSize := int(batchLen) - recordBatchOverhead + recBuffer, err := pd.getRawBytes(bufSize) + if err != nil { + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err + } + + if err = pd.pop(); err != nil { + return err + } + + recBuffer, err = decompress(b.Codec, recBuffer) + if err != nil { + return err + } + + b.recordsLen = len(recBuffer) + err = decode(recBuffer, recordsArray(b.Records)) + if err == ErrInsufficientData { + b.PartialTrailingRecord = true + b.Records = nil + return nil + } + return err +} + +func (b *RecordBatch) encodeRecords(pe packetEncoder) error { + var raw []byte + var err error + if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil { + return err + } + b.recordsLen = len(raw) + + b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw) + return err +} + +func (b *RecordBatch) computeAttributes() int16 { + attr := int16(b.Codec) & int16(compressionCodecMask) + if b.Control { + attr |= controlMask + } + if b.LogAppendTime { + attr |= timestampTypeMask + } + if b.IsTransactional { + attr |= isTransactionalMask + } + return attr +} + +func (b *RecordBatch) addRecord(r *Record) { + b.Records = append(b.Records, r) +} diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go new file mode 100644 index 00000000..f4c5e95f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/records.go @@ -0,0 +1,203 @@ +package sarama + +import "fmt" + +const ( + unknownRecords = iota + legacyRecords + defaultRecords + + magicOffset = 16 +) + +// Records implements a union type containing either a RecordBatch or a legacy MessageSet. +type Records struct { + recordsType int + MsgSet *MessageSet + RecordBatch *RecordBatch +} + +func newLegacyRecords(msgSet *MessageSet) Records { + return Records{recordsType: legacyRecords, MsgSet: msgSet} +} + +func newDefaultRecords(batch *RecordBatch) Records { + return Records{recordsType: defaultRecords, RecordBatch: batch} +} + +// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil. +// The first return value indicates whether both fields are nil (and the type is not set). +// If both fields are not nil, it returns an error. +func (r *Records) setTypeFromFields() (bool, error) { + if r.MsgSet == nil && r.RecordBatch == nil { + return true, nil + } + if r.MsgSet != nil && r.RecordBatch != nil { + return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown") + } + r.recordsType = defaultRecords + if r.MsgSet != nil { + r.recordsType = legacyRecords + } + return false, nil +} + +func (r *Records) encode(pe packetEncoder) error { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return nil + } + return r.MsgSet.encode(pe) + case defaultRecords: + if r.RecordBatch == nil { + return nil + } + return r.RecordBatch.encode(pe) + } + + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) setTypeFromMagic(pd packetDecoder) error { + magic, err := magicValue(pd) + if err != nil { + return err + } + + r.recordsType = defaultRecords + if magic < 2 { + r.recordsType = legacyRecords + } + + return nil +} + +func (r *Records) decode(pd packetDecoder) error { + if r.recordsType == unknownRecords { + if err := r.setTypeFromMagic(pd); err != nil { + return err + } + } + + switch r.recordsType { + case legacyRecords: + r.MsgSet = &MessageSet{} + return r.MsgSet.decode(pd) + case defaultRecords: + r.RecordBatch = &RecordBatch{} + return r.RecordBatch.decode(pd) + } + return fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) numRecords() (int, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return 0, err + } + } + + switch r.recordsType { + case legacyRecords: + if r.MsgSet == nil { + return 0, nil + } + return len(r.MsgSet.Messages), nil + case defaultRecords: + if r.RecordBatch == nil { + return 0, nil + } + return len(r.RecordBatch.Records), nil + } + return 0, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isPartial() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.PartialTrailingMessage, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.PartialTrailingRecord, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isControl() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case legacyRecords: + return false, nil + case defaultRecords: + if r.RecordBatch == nil { + return false, nil + } + return r.RecordBatch.Control, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func (r *Records) isOverflow() (bool, error) { + if r.recordsType == unknownRecords { + if empty, err := r.setTypeFromFields(); err != nil || empty { + return false, err + } + } + + switch r.recordsType { + case unknownRecords: + return false, nil + case legacyRecords: + if r.MsgSet == nil { + return false, nil + } + return r.MsgSet.OverflowMessage, nil + case defaultRecords: + return false, nil + } + return false, fmt.Errorf("unknown records type: %v", r.recordsType) +} + +func magicValue(pd packetDecoder) (int8, error) { + return pd.peekInt8(magicOffset) +} + +func (r *Records) getControlRecord() (ControlRecord, error) { + if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 { + return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty") + } + + firstRecord := r.RecordBatch.Records[0] + controlRecord := ControlRecord{} + err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value}) + if err != nil { + return ControlRecord{}, err + } + + return controlRecord, nil +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go index 73310ca8..dcfd3946 100644 --- a/vendor/github.com/Shopify/sarama/request.go +++ b/vendor/github.com/Shopify/sarama/request.go @@ -11,6 +11,7 @@ type protocolBody interface { versionedDecoder key() int16 version() int16 + headerVersion() int16 requiredVersion() KafkaVersion } @@ -20,51 +21,82 @@ type request struct { body protocolBody } -func (r *request) encode(pe packetEncoder) (err error) { +func (r *request) encode(pe packetEncoder) error { pe.push(&lengthField{}) pe.putInt16(r.body.key()) pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) - if err != nil { - return err + + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } } - err = r.body.encode(pe) + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) + } + + err := r.body.encode(pe) if err != nil { return err } + return pe.pop() } func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { + key, err := pd.getInt16() + if err != nil { return err } - var version int16 - if version, err = pd.getInt16(); err != nil { + + version, err := pd.getInt16() + if err != nil { return err } - if r.correlationID, err = pd.getInt32(); err != nil { + + r.correlationID, err = pd.getInt32() + if err != nil { return err } + r.clientID, err = pd.getString() + if err != nil { + return err + } r.body = allocateBody(key, version) if r.body == nil { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } + + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + return r.body.decode(pd, version) } -func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { - lengthBytes := make([]byte, 4) +func decodeRequest(r io.Reader) (*request, int, error) { + var ( + bytesRead int + lengthBytes = make([]byte, 4) + ) + if _, err := io.ReadFull(r, lengthBytes); err != nil { return nil, bytesRead, err } - bytesRead += len(lengthBytes) + bytesRead += len(lengthBytes) length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} } @@ -73,12 +105,14 @@ func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { if _, err := io.ReadFull(r, encodedReq); err != nil { return nil, bytesRead, err } + bytesRead += len(encodedReq) - req = &request{} + req := &request{} if err := decode(encodedReq, req); err != nil { return nil, bytesRead, err } + return req, bytesRead, nil } @@ -87,7 +121,7 @@ func allocateBody(key, version int16) protocolBody { case 0: return &ProduceRequest{} case 1: - return &FetchRequest{} + return &FetchRequest{Version: version} case 2: return &OffsetRequest{Version: version} case 3: @@ -97,7 +131,7 @@ func allocateBody(key, version int16) protocolBody { case 9: return &OffsetFetchRequest{} case 10: - return &ConsumerMetadataRequest{} + return &FindCoordinatorRequest{} case 11: return &JoinGroupRequest{} case 12: @@ -114,6 +148,44 @@ func allocateBody(key, version int16) protocolBody { return &SaslHandshakeRequest{} case 18: return &ApiVersionsRequest{} + case 19: + return &CreateTopicsRequest{} + case 20: + return &DeleteTopicsRequest{} + case 21: + return &DeleteRecordsRequest{} + case 22: + return &InitProducerIDRequest{} + case 24: + return &AddPartitionsToTxnRequest{} + case 25: + return &AddOffsetsToTxnRequest{} + case 26: + return &EndTxnRequest{} + case 28: + return &TxnOffsetCommitRequest{} + case 29: + return &DescribeAclsRequest{} + case 30: + return &CreateAclsRequest{} + case 31: + return &DeleteAclsRequest{} + case 32: + return &DescribeConfigsRequest{} + case 33: + return &AlterConfigsRequest{} + case 35: + return &DescribeLogDirsRequest{} + case 36: + return &SaslAuthenticateRequest{} + case 37: + return &CreatePartitionsRequest{} + case 42: + return &DeleteGroupsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} } return nil } diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go index f3f4d27d..5dffb75b 100644 --- a/vendor/github.com/Shopify/sarama/response_header.go +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -2,12 +2,15 @@ package sarama import "fmt" +const responseLengthSize = 4 +const correlationIDSize = 4 + type responseHeader struct { length int32 correlationID int32 } -func (r *responseHeader) decode(pd packetDecoder) (err error) { +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { r.length, err = pd.getInt32() if err != nil { return err @@ -17,5 +20,12 @@ func (r *responseHeader) decode(pd packetDecoder) (err error) { } r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return err } diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go index 7d5dc60d..48f362d2 100644 --- a/vendor/github.com/Shopify/sarama/sarama.go +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -10,10 +10,7 @@ useful but comes with two caveats: it will generally be less efficient, and the depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. -To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic -consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the -https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 -and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +To consume messages, use Consumer or Consumer-Group API. For lower-level needs, the Broker and Request/Response objects permit precise control over each connection and message sent on the wire; the Client provides higher-level metadata management that is shared between @@ -42,6 +39,10 @@ Broker related metrics: | response-rate-for-broker- | meter | Responses/second received from a given broker | | response-size | histogram | Distribution of the response size in bytes for all brokers | | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + | requests-in-flight | counter | The current number of in-flight requests awaiting a response | + | | | for all brokers | + | requests-in-flight-for-broker- | counter | The current number of in-flight requests awaiting a response | + | | | for a given broker | +----------------------------------------------+------------+---------------------------------------------------------------+ Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. @@ -61,6 +62,14 @@ Producer related metrics: | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ +Consumer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | consumer-batch-size | histogram | Distribution of the number of messages in a batch | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + */ package sarama @@ -69,10 +78,29 @@ import ( "log" ) -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) +var ( + // Logger is the instance of a StdLogger interface that Sarama writes connection + // management events to. By default it is set to discard all log messages via ioutil.Discard, + // but you can set it to redirect wherever you want. + Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + + // PanicHandler is called for recovering from panics spawned internally to the library (and thus + // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. + PanicHandler func(interface{}) + + // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying + // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned + // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt + // to process. + MaxRequestSize int32 = 100 * 1024 * 1024 + + // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If + // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to + // protect the client from running out of memory. Please note that brokers do not have any natural limit on + // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers + // (see https://issues.apache.org/jira/browse/KAFKA-2063). + MaxResponseSize int32 = 100 * 1024 * 1024 +) // StdLogger is used to log error messages. type StdLogger interface { @@ -80,20 +108,3 @@ type StdLogger interface { Printf(format string, v ...interface{}) Println(v ...interface{}) } - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go new file mode 100644 index 00000000..90504df6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslAuthenticateRequest struct { + SaslAuthBytes []byte +} + +// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API +const APIKeySASLAuth = 36 + +func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error { + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) { + r.SaslAuthBytes, err = pd.getBytes() + return err +} + +func (r *SaslAuthenticateRequest) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateRequest) version() int16 { + return 0 +} + +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + +func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go new file mode 100644 index 00000000..3ef57b5a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go @@ -0,0 +1,48 @@ +package sarama + +type SaslAuthenticateResponse struct { + Err KError + ErrorMessage *string + SaslAuthBytes []byte +} + +func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putNullableString(r.ErrorMessage); err != nil { + return err + } + return pe.putBytes(r.SaslAuthBytes) +} + +func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.ErrorMessage, err = pd.getNullableString(); err != nil { + return err + } + + r.SaslAuthBytes, err = pd.getBytes() + + return err +} + +func (r *SaslAuthenticateResponse) key() int16 { + return APIKeySASLAuth +} + +func (r *SaslAuthenticateResponse) version() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + +func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { + return V1_0_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go index fbbc8947..74dc3072 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -2,6 +2,7 @@ package sarama type SaslHandshakeRequest struct { Mechanism string + Version int16 } func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { @@ -25,7 +26,11 @@ func (r *SaslHandshakeRequest) key() int16 { } func (r *SaslHandshakeRequest) version() int16 { - return 0 + return r.Version +} + +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 } func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go index 8379bbb2..69dfc317 100644 --- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -11,13 +11,13 @@ func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { } func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } - var err error + r.Err = KError(kerr) + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { return err } @@ -33,6 +33,10 @@ func (r *SaslHandshakeResponse) version() int16 { return 0 } +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go new file mode 100644 index 00000000..bb0c82c3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go @@ -0,0 +1,124 @@ +package sarama + +type topicPartitionAssignment struct { + Topic string + Partition int32 +} + +type StickyAssignorUserData interface { + partitions() []topicPartitionAssignment + hasGeneration() bool + generation() int +} + +//StickyAssignorUserDataV0 holds topic partition information for an assignment +type StickyAssignorUserDataV0 struct { + Topics map[string][]int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV0) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (m *StickyAssignorUserDataV0) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV0) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV0) hasGeneration() bool { return false } +func (m *StickyAssignorUserDataV0) generation() int { return defaultGeneration } + +//StickyAssignorUserDataV1 holds topic partition information for an assignment +type StickyAssignorUserDataV1 struct { + Topics map[string][]int32 + Generation int32 + + topicPartitions []topicPartitionAssignment +} + +func (m *StickyAssignorUserDataV1) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + pe.putInt32(m.Generation) + return nil +} + +func (m *StickyAssignorUserDataV1) decode(pd packetDecoder) (err error) { + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + m.Generation, err = pd.getInt32() + if err != nil { + return err + } + m.topicPartitions = populateTopicPartitions(m.Topics) + return nil +} + +func (m *StickyAssignorUserDataV1) partitions() []topicPartitionAssignment { return m.topicPartitions } +func (m *StickyAssignorUserDataV1) hasGeneration() bool { return true } +func (m *StickyAssignorUserDataV1) generation() int { return int(m.Generation) } + +func populateTopicPartitions(topics map[string][]int32) []topicPartitionAssignment { + topicPartitions := make([]topicPartitionAssignment, 0) + for topic, partitions := range topics { + for _, partition := range partitions { + topicPartitions = append(topicPartitions, topicPartitionAssignment{Topic: topic, Partition: partition}) + } + } + return topicPartitions +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go index fe207080..ac6ecb13 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_request.go +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -77,6 +77,10 @@ func (r *SyncGroupRequest) version() int16 { return 0 } +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go index 12aef673..af019c42 100644 --- a/vendor/github.com/Shopify/sarama/sync_group_response.go +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -17,12 +17,13 @@ func (r *SyncGroupResponse) encode(pe packetEncoder) error { } func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { - if kerr, err := pd.getInt16(); err != nil { + kerr, err := pd.getInt16() + if err != nil { return err - } else { - r.Err = KError(kerr) } + r.Err = KError(kerr) + r.MemberAssignment, err = pd.getBytes() return } @@ -35,6 +36,10 @@ func (r *SyncGroupResponse) version() int16 { return 0 } +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go index c77ae314..021c5a01 100644 --- a/vendor/github.com/Shopify/sarama/sync_producer.go +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -25,10 +25,10 @@ type SyncProducer interface { // SendMessages will return an error. SendMessages(msgs []*ProducerMessage) error - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. Close() error } @@ -90,13 +90,8 @@ func verifyProducerConfig(config *Config) error { } func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { - oldMetadata := msg.Metadata - defer func() { - msg.Metadata = oldMetadata - }() - expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation + msg.expectation = expectation sp.producer.Input() <- msg if err := <-expectation; err != nil { @@ -107,21 +102,11 @@ func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offs } func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { - savedMetadata := make([]interface{}, len(msgs)) - for i := range msgs { - savedMetadata[i] = msgs[i].Metadata - } - defer func() { - for i := range msgs { - msgs[i].Metadata = savedMetadata[i] - } - }() - expectations := make(chan chan *ProducerError, len(msgs)) go func() { for _, msg := range msgs { expectation := make(chan *ProducerError, 1) - msg.Metadata = expectation + msg.expectation = expectation sp.producer.Input() <- msg expectations <- expectation } @@ -144,7 +129,7 @@ func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { func (sp *syncProducer) handleSuccesses() { defer sp.wg.Done() for msg := range sp.producer.Successes() { - expectation := msg.Metadata.(chan *ProducerError) + expectation := msg.expectation expectation <- nil } } @@ -152,7 +137,7 @@ func (sp *syncProducer) handleSuccesses() { func (sp *syncProducer) handleErrors() { defer sp.wg.Done() for err := range sp.producer.Errors() { - expectation := err.Msg.Metadata.(chan *ProducerError) + expectation := err.Msg.expectation expectation <- err } } diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go new file mode 100644 index 00000000..372278d0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/timestamp.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "fmt" + "time" +) + +type Timestamp struct { + *time.Time +} + +func (t Timestamp) encode(pe packetEncoder) error { + timestamp := int64(-1) + + if !t.Before(time.Unix(0, 0)) { + timestamp = t.UnixNano() / int64(time.Millisecond) + } else if !t.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)} + } + + pe.putInt64(timestamp) + return nil +} + +func (t Timestamp) decode(pd packetDecoder) error { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + *t.Time = timestamp + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go new file mode 100644 index 00000000..c4043a33 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go @@ -0,0 +1,130 @@ +package sarama + +type TxnOffsetCommitRequest struct { + TransactionalID string + GroupID string + ProducerID int64 + ProducerEpoch int16 + Topics map[string][]*PartitionOffsetMetadata +} + +func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error { + if err := pe.putString(t.TransactionalID); err != nil { + return err + } + if err := pe.putString(t.GroupID); err != nil { + return err + } + pe.putInt64(t.ProducerID) + pe.putInt16(t.ProducerEpoch) + + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + for topic, partitions := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for _, partition := range partitions { + if err := partition.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + if t.TransactionalID, err = pd.getString(); err != nil { + return err + } + if t.GroupID, err = pd.getString(); err != nil { + return err + } + if t.ProducerID, err = pd.getInt64(); err != nil { + return err + } + if t.ProducerEpoch, err = pd.getInt16(); err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionOffsetMetadata) + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionOffsetMetadata, m) + + for j := 0; j < m; j++ { + partitionOffsetMetadata := new(PartitionOffsetMetadata) + if err := partitionOffsetMetadata.decode(pd, version); err != nil { + return err + } + t.Topics[topic][j] = partitionOffsetMetadata + } + } + + return nil +} + +func (a *TxnOffsetCommitRequest) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitRequest) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + +func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { + return V0_11_0_0 +} + +type PartitionOffsetMetadata struct { + Partition int32 + Offset int64 + Metadata *string +} + +func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error { + pe.putInt32(p.Partition) + pe.putInt64(p.Offset) + if err := pe.putNullableString(p.Metadata); err != nil { + return err + } + + return nil +} + +func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) { + if p.Partition, err = pd.getInt32(); err != nil { + return err + } + if p.Offset, err = pd.getInt64(); err != nil { + return err + } + if p.Metadata, err = pd.getNullableString(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go new file mode 100644 index 00000000..94d8029d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go @@ -0,0 +1,87 @@ +package sarama + +import ( + "time" +) + +type TxnOffsetCommitResponse struct { + ThrottleTime time.Duration + Topics map[string][]*PartitionError +} + +func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error { + pe.putInt32(int32(t.ThrottleTime / time.Millisecond)) + if err := pe.putArrayLength(len(t.Topics)); err != nil { + return err + } + + for topic, e := range t.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(e)); err != nil { + return err + } + for _, partitionError := range e { + if err := partitionError.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + throttleTime, err := pd.getInt32() + if err != nil { + return err + } + t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond + + n, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics = make(map[string][]*PartitionError) + + for i := 0; i < n; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + + m, err := pd.getArrayLength() + if err != nil { + return err + } + + t.Topics[topic] = make([]*PartitionError, m) + + for j := 0; j < m; j++ { + t.Topics[topic][j] = new(PartitionError) + if err := t.Topics[topic][j].decode(pd, version); err != nil { + return err + } + } + } + + return nil +} + +func (a *TxnOffsetCommitResponse) key() int16 { + return 28 +} + +func (a *TxnOffsetCommitResponse) version() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + +func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { + return V0_11_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go index 3cbab2d9..d138a5eb 100644 --- a/vendor/github.com/Shopify/sarama/utils.go +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -2,8 +2,9 @@ package sarama import ( "bufio" + "fmt" "net" - "sort" + "regexp" ) type none struct{} @@ -23,13 +24,9 @@ func (slice int32Slice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } -func dupeAndSort(input []int32) []int32 { +func dupInt32Slice(input []int32) []int32 { ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } - - sort.Sort(int32Slice(ret)) + ret = append(ret, input...) return ret } @@ -140,13 +137,91 @@ func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { // Effective constants defining the supported kafka versions. var ( - V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) - V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) - V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) - V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) - V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) - V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) - V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) - V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) - minVersion = V0_8_2_0 + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_1_1 = newKafkaVersion(0, 10, 1, 1) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + V0_10_2_1 = newKafkaVersion(0, 10, 2, 1) + V0_11_0_0 = newKafkaVersion(0, 11, 0, 0) + V0_11_0_1 = newKafkaVersion(0, 11, 0, 1) + V0_11_0_2 = newKafkaVersion(0, 11, 0, 2) + V1_0_0_0 = newKafkaVersion(1, 0, 0, 0) + V1_1_0_0 = newKafkaVersion(1, 1, 0, 0) + V1_1_1_0 = newKafkaVersion(1, 1, 1, 0) + V2_0_0_0 = newKafkaVersion(2, 0, 0, 0) + V2_0_1_0 = newKafkaVersion(2, 0, 1, 0) + V2_1_0_0 = newKafkaVersion(2, 1, 0, 0) + V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) + V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) + V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) + + SupportedVersions = []KafkaVersion{ + V0_8_2_0, + V0_8_2_1, + V0_8_2_2, + V0_9_0_0, + V0_9_0_1, + V0_10_0_0, + V0_10_0_1, + V0_10_1_0, + V0_10_1_1, + V0_10_2_0, + V0_10_2_1, + V0_11_0_0, + V0_11_0_1, + V0_11_0_2, + V1_0_0_0, + V1_1_0_0, + V1_1_1_0, + V2_0_0_0, + V2_0_1_0, + V2_1_0_0, + V2_2_0_0, + V2_3_0_0, + V2_4_0_0, + V2_5_0_0, + } + MinVersion = V0_8_2_0 + MaxVersion = V2_5_0_0 ) + +//ParseKafkaVersion parses and returns kafka version or error from a string +func ParseKafkaVersion(s string) (KafkaVersion, error) { + if len(s) < 5 { + return MinVersion, fmt.Errorf("invalid version `%s`", s) + } + var major, minor, veryMinor, patch uint + var err error + if s[0] == '0' { + err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch}) + } else { + err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor}) + } + if err != nil { + return MinVersion, err + } + return newKafkaVersion(major, minor, veryMinor, patch), nil +} + +func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error { + if !regexp.MustCompile(pattern).MatchString(s) { + return fmt.Errorf("invalid version `%s`", s) + } + _, err := fmt.Sscanf(s, format, v[0], v[1], v[2]) + return err +} + +func (v KafkaVersion) String() string { + if v.version[0] == 0 { + return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3]) + } + + return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2]) +} diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go new file mode 100644 index 00000000..7c9951ac --- /dev/null +++ b/vendor/github.com/Shopify/sarama/zstd.go @@ -0,0 +1,28 @@ +package sarama + +import ( + "sync" + + "github.com/klauspost/compress/zstd" +) + +var ( + zstdDec *zstd.Decoder + zstdEnc *zstd.Encoder + + zstdEncOnce, zstdDecOnce sync.Once +) + +func zstdDecompress(dst, src []byte) ([]byte, error) { + zstdDecOnce.Do(func() { + zstdDec, _ = zstd.NewReader(nil) + }) + return zstdDec.DecodeAll(src, dst) +} + +func zstdCompress(dst, src []byte) ([]byte, error) { + zstdEncOnce.Do(func() { + zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) + }) + return zstdEnc.EncodeAll(src, dst), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000..1c496742 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 56fdfc2b..99849c0e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 0202a008..9cf7eaf4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error @@ -181,7 +208,7 @@ func (e errorList) Error() string { // How do we want to handle the array size being zero if size := len(e); size > 0 { for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) + msg += e[i].Error() // We check the next index to see if it is within the slice. // If it is, then we append a newline. We do this, because unit tests // could be broken with the additional '\n' diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 11c52c38..a4eb6a7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false @@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { // SetValueAtPath sets a value at the case insensitive lexical path inside // of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue } + setValue(rval, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 70960538..03334d69 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,6 +12,7 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string @@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op default: maxRetries := aws.IntValue(cfg.MaxRetries) if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 + maxRetries = DefaultRetryerMaxNumRetries } svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d0..9f6af19d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,6 +1,7 @@ package client import ( + "math" "strconv" "time" @@ -9,82 +10,142 @@ import ( ) // DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. // -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration } +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + // MaxRetries returns the number of maximum returns the service will use to make // an individual API request. func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - minTime = 500 + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay } retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) } + return delay + initialDelay +} - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + // If one of the other handlers already set the retry state // we don't want to override it based on the service's state if r.Retryable != nil { return *r.Retryable } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long // it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { if !canUseRetryAfterHeader(r) { return 0, false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 7b5e1276..8958c32d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -67,10 +67,14 @@ func logRequest(r *request.Request) { if !bodySeekable { r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fdd..0c48f72e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000..881d575f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 10634d17..2c002e15 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -161,6 +161,17 @@ type Config struct { // on GetObject API calls. S3DisableContentMD5Validation *bool + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -227,6 +238,7 @@ type Config struct { // EnableEndpointDiscovery will allow for endpoint discovery on operations that // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. // // Example: // sess := session.Must(session.NewSession(&aws.Config{ @@ -246,12 +258,18 @@ type Config struct { // Disabling this feature is useful when you want to use local endpoints // for testing that do not support the modeled host prefix pattern. DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), @@ -379,6 +397,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { } +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + // WithUseDualStack sets a config UseDualStack value returning a Config // pointer for chaining. func (c *Config) WithUseDualStack(enable bool) *Config { @@ -420,6 +445,20 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -493,6 +532,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation } + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -520,6 +563,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableEndpointHostPrefix != nil { dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go index 66c5945d..2f944633 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -2,42 +2,8 @@ package aws -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) +import ( + "github.com/aws/aws-sdk-go/internal/context" ) // BackgroundContext returns a context that will never be canceled, has no @@ -52,5 +18,5 @@ var ( // // See https://golang.org/pkg/context for more information on Contexts. func BackgroundContext() Context { - return backgroundCtx + return context.BackgroundCtx } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index ff5d58e0..4e076c18 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v @@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + // Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index f8853d78..aa902d70 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 00000000..5852b264 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 00000000..388b2154 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 00000000..8152a864 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 00000000..4356edb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 894bbc7f..9f8fd92a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,9 +50,11 @@ package credentials import ( "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" - "sync" + "sync/atomic" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -83,6 +85,12 @@ type Value struct { ProviderName string } +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + // A Provider is the interface for any component which will provide credentials // Value. A provider is required to manage its own Expired state, and what to // be expired means. @@ -99,6 +107,13 @@ type Provider interface { IsExpired() bool } +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + // An Expirer is an interface that Providers can implement to expose the expiration // time, if known. If the Provider cannot accurately provide this info, // it should not implement this interface. @@ -190,24 +205,24 @@ func (e *Expiry) ExpiresAt() time.Time { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex + creds atomic.Value + sf singleflight.Group provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, + c := &Credentials{ + provider: provider, } + c.creds.Store(Value{}) + return c } -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. // // Will return the cached credentials Value if it has not expired. If the // credentials Value has expired the Provider's Retrieve() will be called @@ -215,31 +230,56 @@ func NewCredentials(provider Provider) *Credentials { // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false +} + +func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil } - return c.creds, nil + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds.Store(creds) + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) } // Expire expires the credentials and forces them to be retrieved on the @@ -248,10 +288,7 @@ func (c *Credentials) Get() (Value, error) { // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true + c.creds.Store(Value{}) } // IsExpired returns if the credentials are no longer valid, and need @@ -260,33 +297,43 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() + return c.isExpired(c.creds.Load()) } // isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() } // ExpiresAt provides access to the functionality of the Expirer interface of // the underlying Provider, if it supports that interface. Otherwise, it returns // an error. func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - expirer, ok := c.provider.(Expirer) if !ok { return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), nil) } - if c.forceRefresh { + if c.creds.Load().(Value) == (Value{}) { // set expiration time to the distant past return time.Time{}, nil } return expirer.ExpiresAt(), nil } + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 0ed791be..92af5b72 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -7,10 +7,12 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) @@ -86,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -96,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { } credsName := credsList[0] - roleCreds, err := requestCred(m.Client, credsName) + roleCreds, err := requestCred(ctx, m.Client, credsName) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -129,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } @@ -142,7 +151,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -152,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", @@ -164,7 +174,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index ace51313..785f30d8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,6 +39,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -97,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } @@ -115,7 +116,13 @@ func (p *Provider) IsExpired() bool { // Retrieve will attempt to request the credentials from the endpoint the Provider // was configured for. And error will be returned if the retrieval fails. func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) if err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New("CredentialsEndpointError", "failed to load credentials", err) @@ -147,7 +154,7 @@ type errorOutput struct { Message string `json:"message"` } -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { op := &request.Operation{ Name: "GetCredentials", HTTPMethod: "GET", @@ -155,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") if authToken := p.AuthorizationToken; len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) @@ -174,7 +182,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -185,11 +193,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index 1980c8c1..e6248360 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -90,6 +90,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -142,7 +143,7 @@ const ( // DefaultBufSize limits buffer size from growing to an enormous // amount due to a faulty process. - DefaultBufSize = 1024 + DefaultBufSize = int(8 * sdkio.KibiByte) // DefaultTimeout default limit on time a process can run. DefaultTimeout = time.Duration(1) * time.Minute diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index e1551495..22b5c5d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -17,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 531139e3..cbba1e3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -19,7 +19,9 @@ type StaticProvider struct { } // NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. func NewStaticCredentials(id, secret, token string) *Credentials { return NewCredentials(&StaticProvider{Value: Value{ AccessKeyID: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index b6dbfd24..6846ef6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -87,6 +87,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) @@ -118,6 +119,10 @@ type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute @@ -144,6 +149,13 @@ type AssumeRoleProvider struct { // Session name, if you wish to reuse the credentials elsewhere. RoleSessionName string + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. Duration time.Duration @@ -157,6 +169,29 @@ type AssumeRoleProvider struct { // size. Policy *string + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. @@ -200,7 +235,7 @@ type AssumeRoleProvider struct { // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must // have a value between 0 and 1. Any other value may lead to expected behavior. // With a MaxJitterFrac value of 0, default) will no jitter will be used. - // + // // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the // AssumeRole call will be made with an arbitrary Duration between 27m and // 30m. @@ -258,7 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -270,10 +309,13 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { input.Policy = p.Policy @@ -296,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } } - roleOutput, err := p.Client.AssumeRole(input) + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000..6feb262b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,135 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go index 152d785b..25a66d1d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -1,30 +1,61 @@ -// Package csm provides Client Side Monitoring (CSM) which enables sending metrics -// via UDP connection. Using the Start function will enable the reporting of -// metrics on a given port. If Start is called, with different parameters, again, -// a panic will occur. +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. // -// Pause can be called to pause any metrics publishing on a given port. Sessions -// that have had their handlers modified via InjectHandlers may still be used. -// However, the handlers will act as a no-op meaning no metrics will be published. +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. // -// Example: // r, err := csm.Start("clientID", ":31000") // if err != nil { // panic(fmt.Errorf("failed starting CSM: %v", err)) // } // +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// // sess, err := session.NewSession(&aws.Config{}) // if err != nil { // panic(fmt.Errorf("failed loading session: %v", err)) // } // +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. // r.InjectHandlers(&sess.Handlers) // -// client := s3.New(sess) -// resp, err := client.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String("bucket"), -// Key: aws.String("key"), -// }) +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() // // // Will pause monitoring // r.Pause() @@ -35,12 +66,4 @@ // // // Resume monitoring // r.Continue() -// -// Start returns a Reporter that is used to enable or disable monitoring. If -// access to the Reporter is required later, calling Get will return the Reporter -// singleton. -// -// Example: -// r := csm.Get() -// r.Continue() package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go index 2f0c6eac..4b19e280 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -2,6 +2,7 @@ package csm import ( "fmt" + "strings" "sync" ) @@ -9,19 +10,40 @@ var ( lock sync.Mutex ) -// Client side metric handler names const ( - APICallMetricHandlerName = "awscsm.SendAPICallMetric" - APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" ) -// Start will start the a long running go routine to capture +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture // client side metrics. Calling start multiple time will only // start the metric listener once and will panic if a different // client ID or port is passed in. // -// Example: -// r, err := csm.Start("clientID", "127.0.0.1:8094") +// r, err := csm.Start("clientID", "127.0.0.1:31000") // if err != nil { // panic(fmt.Errorf("expected no error, but received %v", err)) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go index 514fc373..82a3e345 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -16,25 +16,26 @@ var ( type metricChan struct { ch chan metric - paused int64 + paused *int64 } func newMetricChan(size int) metricChan { return metricChan{ - ch: make(chan metric, size), + ch: make(chan metric, size), + paused: new(int64), } } func (ch *metricChan) Pause() { - atomic.StoreInt64(&ch.paused, pausedEnum) + atomic.StoreInt64(ch.paused, pausedEnum) } func (ch *metricChan) Continue() { - atomic.StoreInt64(&ch.paused, runningEnum) + atomic.StoreInt64(ch.paused, runningEnum) } func (ch *metricChan) IsPaused() bool { - v := atomic.LoadInt64(&ch.paused) + v := atomic.LoadInt64(ch.paused) return v == pausedEnum } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 0b5571ac..835bcd49 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -10,11 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -const ( - // DefaultPort is used when no port is specified - DefaultPort = "31000" -) - // Reporter will gather metrics of API requests made and // send those metrics to the CSM endpoint. type Reporter struct { @@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } @@ -95,8 +89,8 @@ func getMetricException(err awserr.Error) metricException { code := err.Code() switch code { - case "RequestError", - "SerializationError", + case request.ErrCodeRequestError, + request.ErrCodeSerialization, request.CanceledErrorCode: return sdkException{ requestException{exception: code, message: msg}, @@ -123,7 +117,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { Type: aws.String("ApiCall"), AttemptCount: aws.Int(r.RetryCount + 1), Region: r.Config.Region, - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), XAmzRequestID: aws.String(r.RequestID), MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } @@ -190,8 +184,9 @@ func (rep *Reporter) start() { } } -// Pause will pause the metric channel preventing any new metrics from -// being added. +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. func (rep *Reporter) Pause() { lock.Lock() defer lock.Unlock() @@ -203,8 +198,9 @@ func (rep *Reporter) Pause() { rep.close() } -// Continue will reopen the metric channel and allow for monitoring -// to be resumed. +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. func (rep *Reporter) Continue() { lock.Lock() defer lock.Unlock() @@ -219,10 +215,18 @@ func (rep *Reporter) Continue() { rep.metricsCh.Continue() } +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + // InjectHandlers will will enable client side metrics and inject the proper // handlers to handle how metrics are sent. // -// Example: +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// // // Start must be called in order to inject the correct handlers // r, err := csm.Start("clientID", "127.0.0.1:8094") // if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index d57a1af5..a716c021 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,28 +4,73 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" ) +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + // GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or +// instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", HTTPPath: sdkuri.PathJoin("/meta-data", p), } - output := &metadataOutput{} + req := c.NewRequest(op, nil, output) - err := req.Send() + req.SetContext(ctx) + + err := req.Send() return output.Content, err } @@ -33,6 +78,13 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { // there is no user-data setup for the EC2 instance a "NotFoundError" error // code will be returned. func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", @@ -41,13 +93,9 @@ func (c *EC2Metadata) GetUserData() (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - err := req.Send() + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -55,6 +103,13 @@ func (c *EC2Metadata) GetUserData() (string, error) { // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", @@ -63,8 +118,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - err := req.Send() + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -72,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { // instance. Error is returned if the request fails or is unable to parse // the response. func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") if err != nil { return EC2InstanceIdentityDocument{}, awserr.New("EC2MetadataRequestError", @@ -82,7 +145,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -91,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument // IAMInfo retrieves IAM info from the metadata API func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") if err != nil { return EC2IAMInfo{}, awserr.New("EC2MetadataRequestError", @@ -101,7 +169,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } @@ -116,24 +184,36 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) if err != nil { return "", err } - - if len(resp) == 0 { - return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil + // returns region + return region, nil } // Available returns if the application has access to the EC2 Metadata service. // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { return false } @@ -152,18 +232,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index f4438eae..b8b2940d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -13,6 +13,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "time" @@ -24,9 +25,25 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,8 +80,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio // use a shorter timeout than default because the metadata // service is local if it is running, and to fail faster // if not running on an ec2 instance. - Timeout: 5 * time.Second, + Timeout: 1 * time.Second, } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) } svc := &EC2Metadata{ @@ -80,13 +99,27 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ), } - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) svc.Handlers.UnmarshalError.PushBack(unmarshalError) svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send + // This short-circuits the service's functionality to always fail to send // requests. if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { svc.Handlers.Send.SwapNamed(request.NamedHandler{ @@ -107,7 +140,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio for _, option := range opts { option(svc.Client) } - return svc } @@ -119,30 +151,74 @@ type metadataOutput struct { Content string } -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } +type tokenOutput struct { + Token string + TTL time.Duration +} - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) return } // Response body format is not consistent between metadata endpoints. // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) } func validateEndpointHandler(r *request.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 00000000..d0a3a020 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3f..654fb1ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) custFixAppAutoscalingUsGov(p) @@ -92,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol } func custAddS3DualStack(p *partition) { - if p.ID != "aws" { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { return } @@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) { custAddDualstack(p, "s3-control") } +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + func custAddDualstack(p *partition, svcName string) { s, ok := p.Services[svcName] if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 17ee7c72..710d551c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -11,10 +11,13 @@ const ( AwsPartitionID = "aws" // AWS Standard partition. AwsCnPartitionID = "aws-cn" // AWS China partition. AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. ) // AWS Standard partition's regions. const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). @@ -22,11 +25,13 @@ const ( ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -43,11 +48,21 @@ const ( // AWS GovCloud (US) partition's regions. const ( UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -55,7 +70,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -69,6 +84,8 @@ var defaultPartitions = partitions{ awsPartition, awscnPartition, awsusgovPartition, + awsisoPartition, + awsisobPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -82,7 +99,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -92,6 +109,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, "ap-east-1": region{ Description: "Asia Pacific (Hong Kong)", }, @@ -114,19 +134,25 @@ var awsPartition = partition{ Description: "Canada (Central)", }, "eu-central-1": region{ - Description: "EU (Frankfurt)", + Description: "Europe (Frankfurt)", }, "eu-north-1": region{ - Description: "EU (Stockholm)", + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", }, "eu-west-1": region{ - Description: "EU (Ireland)", + Description: "Europe (Ireland)", }, "eu-west-2": region{ - Description: "EU (London)", + Description: "Europe (London)", }, "eu-west-3": region{ - Description: "EU (Paris)", + Description: "Europe (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", }, "sa-east-1": region{ Description: "South America (Sao Paulo)", @@ -151,9 +177,10 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "acm": service{ + "access-analyzer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -163,9 +190,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -173,7 +202,117 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.detective": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, @@ -189,6 +328,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -198,6 +338,12 @@ var awsPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, "ap-east-1": endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -252,6 +398,12 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "eu-west-1": endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -270,6 +422,36 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "sa-east-1": endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -302,12 +484,36 @@ var awsPartition = partition{ }, }, }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -327,6 +533,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -334,8 +541,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -369,6 +580,7 @@ var awsPartition = partition{ "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -378,9 +590,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -390,12 +604,33 @@ var awsPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, @@ -409,6 +644,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -430,8 +666,14 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "appsync": service{ @@ -453,6 +695,7 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -460,10 +703,15 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -472,6 +720,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -481,9 +730,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -493,13 +744,10 @@ var awsPartition = partition{ }, "autoscaling-plans": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -507,17 +755,22 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "batch": service{ + "backup": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -529,6 +782,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -536,6 +790,53 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "budgets": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -582,11 +883,23 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -607,6 +920,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -616,23 +930,49 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Region: "us-east-1", @@ -674,6 +1014,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -698,6 +1040,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -707,19 +1050,61 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "codebuild": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -727,9 +1112,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -764,6 +1151,7 @@ var awsPartition = partition{ "codecommit": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -771,6 +1159,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -780,16 +1169,18 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -799,9 +1190,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -843,27 +1236,80 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codestar": service{ + "codestar-connections": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -882,9 +1328,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-idp": service{ @@ -899,9 +1363,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-sync": service{ @@ -925,21 +1407,63 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "comprehendmedical": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -960,6 +1484,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -967,6 +1492,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cur": service{ Endpoints: endpoints{ @@ -982,21 +1519,12 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datasync": service{ + "dataexchange": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1005,37 +1533,28 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "dax": service{ + "datapipeline": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ + "datasync": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1044,6 +1563,56 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1054,15 +1623,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "discovery": service{ + "devicefarm": service{ Endpoints: endpoints{ "us-west-2": endpoint{}, }, }, - "dms": service{ + "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1072,16 +1642,83 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "dms": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "docdb": service{ Endpoints: endpoints{ @@ -1097,6 +1734,30 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1109,6 +1770,18 @@ var awsPartition = partition{ Region: "eu-west-1", }, }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1132,6 +1805,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1139,13 +1813,46 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dynamodb": service{ @@ -1153,6 +1860,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1160,11 +1868,18 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1172,11 +1887,36 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "ec2": service{ @@ -1184,6 +1924,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1193,14 +1934,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ec2metadata": service{ @@ -1217,6 +1990,7 @@ var awsPartition = partition{ "ecs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1226,47 +2000,120 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1275,31 +2122,185 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticfilesystem": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticloadbalancing": service{ @@ -1307,6 +2308,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1316,14 +2318,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticmapreduce": service{ @@ -1332,6 +2360,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1343,9 +2372,41 @@ var awsPartition = partition{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -1392,6 +2453,7 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1401,6 +2463,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1410,16 +2473,18 @@ var awsPartition = partition{ Region: "us-west-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1429,19 +2494,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1449,14 +2541,40 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "fms": service{ @@ -1466,25 +2584,159 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "fsx": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1512,6 +2764,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1521,19 +2774,52 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "glue": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1541,13 +2827,40 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1557,10 +2870,26 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1570,6 +2899,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1581,30 +2911,61 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ "aws-global": endpoint{ Hostname: "iam.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "importexport": service{ @@ -1630,11 +2991,37 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "iot": service{ @@ -1644,16 +3031,23 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1668,19 +3062,87 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kafka": service{ + "iotevents": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "kinesis": service{ + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -1695,6 +3157,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1702,41 +3165,24 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, }, - }, - "kinesisvideo": service{ - Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "kms": service{ + "kafka": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1749,6 +3195,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1756,9 +3203,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "lambda": service{ + "kinesis": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1768,35 +3216,68 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "license-manager": service{ + "kinesisanalytics": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "lightsail": service{ + "kinesisvideo": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1807,14 +3288,16 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "logs": service{ + "kms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1824,9 +3307,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1834,20 +3319,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ + "lakeformation": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1855,7 +3327,9 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1866,9 +3340,11 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "mediaconvert": service{ + "lambda": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1876,32 +3352,93 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "medialive": service{ + "license-manager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "mediapackage": service{ + "lightsail": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1909,35 +3446,20 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "mediastore": service{ + "logs": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1947,9 +3469,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1957,34 +3481,50 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "mgh": service{ + "machinelearning": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, }, }, - "mobileanalytics": service{ + "macie": service{ Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, }, + }, + "marketplacecommerceanalytics": service{ + Endpoints: endpoints{ - "eu-west-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "mediaconnect": service{ + Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, @@ -1992,7 +3532,6 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, @@ -2005,104 +3544,58 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "mq": service{ + "mediaconvert": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "ca-central-1", }, }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-east-1", }, }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-1", }, }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "opsworks": service{ + "medialive": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -2110,63 +3603,59 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "opsworks-cm": service{ + "mediapackage": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "mediastore": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "polly": service{ - + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2175,9 +3664,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2185,87 +3676,44 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ram": service{ + "mgh": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "rds": service{ + "mobileanalytics": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, }, }, - "redshift": service{ - + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "resource-groups": service{ - Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2275,80 +3723,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "runtime.sagemaker": service{ + "mq": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2356,290 +3770,207 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, + "mturk-requester": service{ + IsRegionalized: boxedFalse, - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, }, + }, + "neptune": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, }, "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "me-south-1", }, }, "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-1", }, }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-southeast-2", }, }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-central-1", }, }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-1", }, }, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-west-2", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "securityhub": service{ + "opsworks": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -2659,67 +3990,45 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, }, }, - "servicecatalog": service{ + "outposts": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -2728,70 +4037,84 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "servicediscovery": service{ - + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "sms": service{ + "polly": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2803,36 +4126,125 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, + }, + "ram": service{ + Endpoints: endpoints{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, @@ -2846,6 +4258,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2853,12 +4266,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "rds": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2868,45 +4279,54 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + "me-south-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, "sa-east-1": endpoint{}, "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", + SSLCommonName: "{service}.{dnsSuffix}", }, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "ssm": service{ + "redshift": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2916,19 +4336,69 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "states": service{ + "resource-groups": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2938,19 +4408,79 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "storagegateway": service{ + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2962,6 +4492,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2969,14 +4500,26 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "streams.dynamodb": service{ + "runtime.lex": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ - Service: "dynamodb", + Service: "lex", }, }, Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2988,90 +4531,274 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "sts": service{ + "s3": service{ PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ - Region: "ap-east-1", + Region: "ap-northeast-1", }, }, - "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "us-west-2": endpoint{}, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "support": service{ + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "swf": service{ + "schemas": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -3093,31 +4820,29 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "tagging": service{ - + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "transfer": service{ + "secretsmanager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3125,116 +4850,206 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, "us-east-2": endpoint{}, "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-west-1", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, }, }, }, - "waf-regional": service{ + "securityhub": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "workmail": service{ + "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, }, }, - "workspaces": service{ + "servicecatalog": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "xray": service{ + "servicediscovery": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3246,6 +5061,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3253,1111 +5069,3998 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} + "session.qldb": service{ -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, }, - }, - Services: services{ - "api.ecr": service{ - + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-east-1", }, }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-east-1", }, }, }, }, - "apigateway": service{ + "sms": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "ssm-facade-fips-us-east-1": endpoint{ + Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ssm-facade-fips-us-east-2": endpoint{ + Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "ssm-facade-fips-us-west-1": endpoint{ + Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "ssm-facade-fips-us-west-2": endpoint{ + Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, }, - "cloudformation": service{ + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, + "kafka": service{ Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "kinesis-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "kinesis-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "cloudtrail": service{ + "kms": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "codebuild": service{ + "lambda": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "codedeploy": service{ + "license-manager": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "cognito-identity": service{ + "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "config": service{ + "mediaconvert": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "directconnect": service{ + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "dms": service{ + "neptune": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "ds": service{ + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "dynamodb": service{ + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, + }, + "route53resolver": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "runtime.sagemaker": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, }, }, - "ecs": service{ + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "elasticache": service{ + "secretsmanager": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "elasticbeanstalk": service{ + "securityhub": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticloadbalancing": service{ + "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "servicecatalog": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "es": service{ + "sms": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "events": service{ + "snowball": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "firehose": service{ + "sns": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "gamelift": service{ + "sqs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "ssm": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "ssm-facade-fips-us-gov-east-1": endpoint{ + Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "ssm-facade-fips-us-gov-west-1": endpoint{ + Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, + }, + "states": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, + "storagegateway": service{ Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "iot": service{ + "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ - Service: "execute-api", + Service: "dynamodb", }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "lambda": service{ + "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "logs": service{ + "support": service{ + PartitionEndpoint: "aws-us-gov-global", Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "mediaconvert": service{ + "swf": service{ Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, }, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "polly": service{ + "tagging": service{ Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, }, - }, - "redshift": service{ - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "s3": service{ + "translate": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, + "waf-regional": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-west-1", }, }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, }, }, }, - "sms": service{ + "workspaces": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "snowball": service{ + "xray": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", }, - "ssm": service{ + }, + Services: services{ + "api.ecr": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, }, }, - "states": service{ + "api.sagemaker": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "storagegateway": service{ + "apigateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "streams.dynamodb": service{ + "application-autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "sts": service{ + "autoscaling": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "swf": service{ + "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "tagging": service{ + "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ - "acm": service{ + }, + "codedeploy": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "acm-pca": service{ + "comprehend": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "api.ecr": service{ + "config": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-iso-east-1", }, }, + "us-iso-east-1": endpoint{}, }, }, - "api.sagemaker": service{ + "ds": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "apigateway": service{ + "dynamodb": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - CredentialScope: credentialScope{ - Service: "application-autoscaling", + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, }, }, + }, + "ecs": service{ + Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "athena": service{ + "elasticache": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "autoscaling": service{ + "elasticloadbalancing": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ + "us-iso-east-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, - "clouddirectory": service{ + "elasticmapreduce": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, }, }, - "cloudformation": service{ + "es": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "cloudhsm": service{ + "events": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, }, }, + }, + "health": service{ + Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "cloudtrail": service{ + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, }, }, - "codecommit": service{ + "kinesis": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "codedeploy": service{ + "kms": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-iso-east-1", }, }, + "us-iso-east-1": endpoint{}, }, }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "lambda": service{ + Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "config": service{ + "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "directconnect": service{ + "monitoring": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "dms": service{ + "rds": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "ds": service{ + "redshift": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "dynamodb": service{ + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-iso-east-1", }, }, }, }, - "ec2": service{ + "runtime.sagemaker": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, }, - "ecs": service{ + "snowball": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "elasticache": service{ + "sns": service{ Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "elasticbeanstalk": service{ + "sqs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "elasticfilesystem": service{ + "states": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, }, }, - "elasticloadbalancing": service{ - + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ + "us-iso-east-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, - "elasticmapreduce": service{ + "sts": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, + "us-iso-east-1": endpoint{}, }, }, - "es": service{ + "support": service{ + PartitionEndpoint: "aws-iso-global", Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-iso-east-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "events": service{ + "swf": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, }, }, - "firehose": service{ + "cloudformation": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "glacier": service{ + "cloudtrail": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, - "glue": service{ + "config": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "directconnect": service{ + Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, + "dms": service{ Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-isob-east-1", }, }, + "us-isob-east-1": endpoint{}, }, }, - "inspector": service{ - + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "iot": service{ + "ec2": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "kinesis": service{ + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, }, }, - "kms": service{ + "elasticache": service{ Endpoints: endpoints{ - "ProdFips": endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "lambda": service{ + "elasticloadbalancing": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, }, }, - "license-manager": service{ + "elasticmapreduce": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "logs": service{ + "events": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "mediaconvert": service{ + "glacier": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, }, }, - "monitoring": service{ + "kinesis": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, + "kms": service{ Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-isob-east-1", }, }, + "us-isob-east-1": endpoint{}, }, }, - "polly": service{ + "lambda": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "rds": service{ + "license-manager": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "redshift": service{ + "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "rekognition": service{ + "monitoring": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "runtime.sagemaker": service{ + "rds": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, + "redshift": service{ + Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, - "s3-control": service{ + "s3": service{ Defaults: endpoint{ - Protocols: []string{"https"}, + Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "sns": service{ - + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, "sqs": service{ - + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "us-isob-east-1": endpoint{}, }, }, "ssm": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "states": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, "streams.dynamodb": service{ Defaults: endpoint{ + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, + "us-isob-east-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, - "tagging": service{ + "support": service{ + PartitionEndpoint: "aws-iso-b-global", Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-isob-east-1", }, }, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "workspaces": service{ + "swf": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-isob-east-1": endpoint{}, }, }, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index f82babf6..ca956e5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -46,6 +47,108 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -170,10 +279,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { - id string - p *partition + id, dnsSuffix string + p *partition } +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + // ID returns the identifier of the partition. func (p Partition) ID() string { return p.id } @@ -191,7 +303,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -205,7 +317,7 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} + rs := make(map[string]Region, len(p.p.Regions)) for id, r := range p.p.Regions { rs[id] = Region{ id: id, @@ -220,7 +332,7 @@ func (p Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. func (p Partition) Services() map[string]Service { - ss := map[string]Service{} + ss := make(map[string]Service, len(p.p.Services)) for id := range p.p.Services { ss[id] = Service{ id: id, @@ -307,7 +419,7 @@ func (s Service) Regions() map[string]Region { // A region is the AWS region the service exists in. Whereas a Endpoint is // an URL that can be resolved to a instance of a service. func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ id: id, @@ -347,6 +459,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000..df75e899 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index ff6f76db..77361372 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,8 @@ import ( "strings" ) +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + type partitions []partition func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -54,8 +56,9 @@ type partition struct { func (p partition) Partition() Partition { return Partition{ - id: p.ID, - p: &p, + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, } } @@ -74,24 +77,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } func serviceList(ss services) []string { @@ -200,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -208,11 +243,27 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op merged.mergeIn(e) e = merged - hostname := e.Hostname + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname // Offset the hostname for dualstack if enabled if opts.UseDualStack && e.HasDualStack == boxedTrue { hostname = e.DualStackHostname + region = signingRegion + } + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") } u := strings.Replace(hostname, "{service}", service, 1) @@ -222,25 +273,14 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } + }, nil } func getEndpointScheme(protocols []string, disableSSL bool) string { @@ -305,3 +345,7 @@ const ( boxedFalse boxedTrue ) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index 271da432..d9b37f4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -1,18 +1,17 @@ -// +build !appengine,!plan9 - package request import ( - "net" - "os" - "syscall" + "strings" ) func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca4..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 8ef8548a..e819ab6c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -10,6 +10,7 @@ import ( type Handlers struct { Validate HandlerList Build HandlerList + BuildStream HandlerList Sign HandlerList Send HandlerList ValidateResponse HandlerList @@ -23,11 +24,12 @@ type Handlers struct { Complete HandlerList } -// Copy returns of this handler's lists. +// Copy returns a copy of this handler's lists. func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), Sign: h.Sign.copy(), Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), @@ -42,10 +44,11 @@ func (h *Handlers) Copy() Handlers { } } -// Clear removes callback functions for all handlers +// Clear removes callback functions for all handlers. func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() + h.BuildStream.Clear() h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() @@ -59,6 +62,54 @@ func (h *Handlers) Clear() { h.Complete.Clear() } +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + // A HandlerListRunItem represents an entry in the HandlerList which // is being run. type HandlerListRunItem struct { @@ -275,3 +326,18 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { AddToUserAgent(r, s) } } + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index b0c2ef4f..9370fa50 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -15,12 +15,15 @@ type offsetReader struct { closed bool } -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } reader.buf = buf - return reader + return reader, nil } // Close will close the instance of the offset reader's access to @@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } return newOffsetReader(o.buf, offset) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 8f2eb3e4..d597c6ea 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -36,6 +36,10 @@ const ( // API request that was canceled. Requests given a aws.Context may // return this error when canceled. CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" ) // A Request is the service request to be made. @@ -51,6 +55,7 @@ type Request struct { HTTPRequest *http.Request HTTPResponse *http.Response Body io.ReadSeeker + streamingBody io.ReadCloser BodyStart int64 // offset from beginning of Body that the request body starts Params interface{} Error error @@ -64,6 +69,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -90,8 +104,12 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// New returns a new Request pointer for the service API -// operation and parameters. +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. // // Params is any value of input parameters to be the request payload. // Data is pointer value to an object which the request's response @@ -99,6 +117,10 @@ type Operation struct { func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + if retryer == nil { + retryer = noOpRetryer{} + } + method := operation.HTTPMethod if method == "" { method = "POST" @@ -113,8 +135,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } - SanitizeHostForHeader(httpReq) - r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -231,6 +251,10 @@ func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -259,10 +283,28 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } r.ResetBody() } +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + // Presign returns the request's signed URL. Error will be returned // if the signing fails. The expire parameter is only used for presigned Amazon // S3 API requests. All other AWS services will use a fixed expiration @@ -330,16 +372,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } @@ -358,12 +399,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -379,20 +420,30 @@ func (r *Request) Build() error { func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } + SanitizeHostForHeader(r.HTTPRequest) + r.Handlers.Sign.Run(r) return r.Error } -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + if r.safeBody != nil { r.safeBody.Close() } - r.safeBody = newOffsetReader(r.Body, r.BodyStart) + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } // Go 1.8 tightened and clarified the rules code needs to use when building // requests with the http package. Go 1.8 removed the automatic detection @@ -409,10 +460,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Related golang/go#18257 l, err := aws.SeekerLen(r.Body) if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) } - var body io.ReadCloser if l == 0 { body = NoBody } else if l > 0 { @@ -473,29 +524,28 @@ func (r *Request) Send() error { r.AttemptTime = time.Now() if err := r.Sign(); err != nil { - debugLogReqError(r, "Sign Request", false, err) + debugLogReqError(r, "Sign Request", notRetrying, err) return err } if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryCancel(r.Error) { - return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } - r.prepareRetry() - continue + if err := r.prepareRetry(); err != nil { + r.Error = err + return err } } } -func (r *Request) prepareRetry() { +func (r *Request) prepareRetry() error { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) @@ -506,12 +556,19 @@ func (r *Request) prepareRetry() { // the request's body even though the Client's Do returned. r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } // Closing response body to ensure that no response body is leaked // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { r.HTTPResponse.Body.Close() } + + return nil } func (r *Request) sendRequest() (sendErr error) { @@ -520,7 +577,9 @@ func (r *Request) sendRequest() (sendErr error) { r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", r.WillRetry(), r.Error) + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -528,13 +587,17 @@ func (r *Request) sendRequest() (sendErr error) { r.Handlers.ValidateResponse.Run(r) if r.Error != nil { r.Handlers.UnmarshalError.Run(r) - debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } r.Handlers.Unmarshal.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error) + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) return r.Error } @@ -561,48 +624,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryCancel(err error) bool { - switch err := err.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryCancel(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryCancel(err.Err) - case temporary: - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retriable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) @@ -618,6 +639,10 @@ func getHost(r *http.Request) string { return r.Host } + if r.URL == nil { + return "" + } + return r.URL.Host } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 7c6a8000..de1292f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -4,6 +4,8 @@ package request import ( "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // NoBody is a http.NoBody reader instructing Go HTTP client to not include @@ -24,7 +26,8 @@ var NoBody = http.NoBody func (r *Request) ResetBody() { body, err := r.getNextRequestBody() if err != nil { - r.Error = err + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index a633ed5a..64784e16 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will @@ -146,7 +148,7 @@ func (r *Request) nextPageTokens() []interface{} { return nil } case bool: - if v == false { + if !v { return nil } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index d0aa54c6..752ae47f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,32 +1,81 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } cfg.Retryer = retryer return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 } // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, + ErrCodeRequestError: {}, "RequestTimeout": {}, ErrCodeResponseTimeout: {}, "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout @@ -34,6 +83,7 @@ var retryableCodes = map[string]struct{}{ var throttleCodes = map[string]struct{}{ "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API "Throttling": {}, "ThrottlingException": {}, "RequestLimitExceeded": {}, @@ -42,6 +92,7 @@ var throttleCodes = map[string]struct{}{ "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -76,10 +127,6 @@ var validParentCodes = map[string]struct{}{ ErrCodeRead: {}, } -type temporaryError interface { - Temporary() bool -} - func isNestedErrorRetryable(parentErr awserr.Error) bool { if parentErr == nil { return false @@ -98,7 +145,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { return isCodeRetryable(aerr.Code()) } - if t, ok := err.(temporaryError); ok { + if t, ok := err.(temporary); ok { return t.Temporary() || isErrConnectionReset(err) } @@ -108,32 +155,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +248,58 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000..fe6dac1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,267 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 38a7b05a..7ec66e7e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -1,97 +1,93 @@ /* -Package session provides configuration for the SDK's service clients. - -Sessions can be shared across all service clients that share the same base -configuration. The Session is built from the SDK's default configuration and -request handlers. - -Sessions should be cached when possible, because creating a new Session will -load all configuration values from the environment, and config files each time -the Session is created. Sharing the Session value across all of your service -clients will ensure the configuration is loaded the fewest number of times possible. - -Concurrency +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. Sessions are safe to use concurrently as long as the Session is not being -modified. The SDK will not modify the Session once the Session has been created. -Creating service clients concurrently from a shared Session is safe. - -Sessions from Shared Config - -Sessions can be created using the method above that will only load the -additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. -Alternatively you can explicitly create a Session with shared config enabled. -To do this you can use NewSessionWithOptions to configure how the Session will -be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG -environment variable was set. +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. -Creating Sessions - -When creating Sessions optional aws.Config values can be passed in that will -override the default, or loaded config values the Session is being created -with. This allows you to provide additional, or case based, configuration -as needed. +Sessions options from Shared Config By default NewSession will only load credentials from the shared credentials file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value the Session will be created from the configuration values from the shared config (~/.aws/config) and shared credentials -(~/.aws/credentials) files. See the section Sessions from Shared Config for -more information. +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. -Create a Session with the default config and request handlers. With credentials -region, and profile loaded from the environment and shared config automatically. -Requires the AWS_PROFILE to be set, or "default" is used. +Credential and config loading order - // Create Session - sess := session.Must(session.NewSession()) +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: - // Create a Session with a custom region - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - })) + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) - // Create a S3 client instance from a session - sess := session.Must(session.NewSession()) +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) - svc := s3.New(sess) +Creating Sessions -Create Session With Option Overrides +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. -In addition to NewSession, Sessions can be created using NewSessionWithOptions. -This func allows you to control and override how the Session will be created -through code instead of being driven by environment variables only. + // Create Session + sess, err := session.NewSession() -Use NewSessionWithOptions when you want to provide the config profile, or -override the shared config state (AWS_SDK_LOAD_CONFIG). + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess := session.Must(session.NewSessionWithOptions(session.Options{ + sess, err := session.NewSessionWithOptions(session.Options{ // Options - })) + }) - // Specify profile to load for the session's config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Profile: "profile_name", - })) + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", - // Specify profile for config and region for requests - sess := session.Must(session.NewSessionWithOptions(session.Options{ - Config: aws.Config{Region: aws.String("us-east-1")}, - Profile: "profile_name", - })) + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, - // Force enable Shared Config support - sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Force enable Shared Config support SharedConfigState: session.SharedConfigEnable, - })) + }) Adding Handlers -You can add handlers to a session for processing HTTP requests. All service -clients that use the session inherit the handlers. For example, the following -handler logs every request and its payload made by a service client: +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. @@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Printf("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Params: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) -Deprecated "New" function - -The New session function has been deprecated because it does not provide good -way to return errors that occur when loading the configuration files and values. -Because of this, NewSession was created so errors can be retrieved when -creating a session fails. - Shared Config Fields -By default the SDK will only load the shared credentials file's (~/.aws/credentials) -credentials values, and all other config is provided by the environment variables, -SDK defaults, and user provided aws.Config values. +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable option is used to create the Session the full shared config values will be @@ -125,24 +114,31 @@ files have the same format. If both config files are present the configuration from both files will be read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). -Credentials are the values the SDK should use for authenticating requests with -AWS Services. They are from a configuration file will need to include both -aws_access_key_id and aws_secret_access_key must be provided together in the -same file to be considered valid. The values will be ignored if not a complete -group. aws_session_token is an optional field that can be provided if both of -the other two fields are also provided. +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. aws_access_key_id = AKID aws_secret_access_key = SECRET aws_session_token = TOKEN -Assume Role values allow you to configure the SDK to assume an IAM role using -a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK supports assuming -a role with MFA token if the session option AssumeRoleTokenProvider -is set. + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds @@ -150,40 +146,16 @@ is set. mfa_serial = role_session_name = session_name -Region is the region the SDK should use for looking up AWS service endpoints -and signing requests. - - region = us-east-1 - -Assume Role with MFA token -To create a session with support for assuming an IAM role with MFA set the -session option AssumeRoleTokenProvider to a function that will prompt for the -MFA token code when the SDK assumes the role and refreshes the role's credentials. -This allows you to configure the SDK via the shared config to assumea role -with MFA tokens. - -In order for the SDK to assume a role with MFA the SharedConfigState -session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG -environment variable set. - -The shared configuration instructs the SDK to assume an IAM role with MFA -when the mfa_serial configuration field is set in the shared config -(~/.aws/config) or shared credentials (~/.aws/credentials) file. - -If mfa_serial is set in the configuration, the SDK will assume the role, and -the AssumeRoleTokenProvider session option is not set an an error will -be returned when creating the session. +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. sess := session.Must(session.NewSessionWithOptions(session.Options{ AssumeRoleTokenProvider: stscreds.StdinTokenProvider, })) - // Create service client value configured for credentials - // from assumed role. - svc := s3.New(sess) - -To setup assume role outside of a session see the stscreds.AssumeRoleProvider +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index e3959b95..c1e0e9c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,12 +1,15 @@ package session import ( + "fmt" "os" "strconv" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -99,21 +102,61 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string + CSMHost string CSMClientID string - enableEndpointDiscovery string // Enables endpoint discovery via environment variables. // // AWS_ENABLE_ENDPOINT_DISCOVERY=true EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool } var ( csmEnabledEnvKey = []string{ "AWS_CSM_ENABLED", } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } csmPortEnvKey = []string{ "AWS_CSM_PORT", } @@ -150,6 +193,24 @@ var ( sharedConfigFileEnvKey = []string{ "AWS_CONFIG_FILE", } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -158,7 +219,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -169,30 +230,42 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig - setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) - setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) - setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) // CSM environment variables setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 - // Require logical grouping of credentials - if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { - cfg.Creds = credentials.Value{} - } else { - cfg.Creds.ProviderName = EnvProviderName + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v } regionKeys := regionEnvKeys @@ -223,12 +296,48 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index be4b5f07..0ff49960 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,19 +8,17 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/processcreds" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/shareddefaults" ) const ( @@ -75,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -95,19 +93,28 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } } return s @@ -126,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session { // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifying the +// control through code how the Session will be created, such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { opts := Options{} @@ -210,6 +217,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -224,6 +237,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -257,13 +276,20 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } - if len(opts.Profile) > 0 { + if len(opts.Profile) != 0 { envCfg.Profile = opts.Profile } @@ -329,27 +355,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { - logger.Log("Enabling CSM") - if len(port) == 0 { - port = csm.DefaultPort +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, "127.0.0.1:"+port) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { - return + return err } r.InjectHandlers(handlers) + + return nil } func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) // Ordered config files will be loaded in with later files overwriting // previous config file values. @@ -366,9 +398,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } // Load additional config from file(s) - sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - return nil, err + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } } if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { @@ -381,8 +421,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } } // Setup HTTP client with custom cert bundle if enabled @@ -395,6 +443,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { @@ -443,9 +531,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { - // Merge in user provided configuration - cfg.MergeIn(userCfg) +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Region if not already set by user if len(aws.StringValue(cfg.Region)) == 0 { @@ -464,162 +554,59 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - - // inspect the profile to see if a credential source has been specified. - if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { - - // if both credential_source and source_profile have been set, return an error - // as this is undefined behavior. - if len(sharedCfg.AssumeRole.SourceProfile) > 0 { - return ErrSharedConfigSourceCollision - } - - // valid credential source values - const ( - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - ) - - switch sharedCfg.AssumeRole.CredentialSource { - case credSourceEc2Metadata: - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - cfgCp.Credentials = credentials.NewCredentials(p) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - case credSourceEnvironment: - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - case credSourceECSContainer: - if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { - return ErrSharedConfigECSContainerEnvVarEmpty - } - - cfgCp := *cfg - p := defaults.RemoteCredProvider(cfgCp, handlers) - creds := credentials.NewCredentials(p) - - cfg.Credentials = creds - default: - return ErrSharedConfigInvalidCredSource - } - - return nil - } - - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - - cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else if len(sharedCfg.CredentialProcess) > 0 { - cfg.Credentials = processcreds.NewCredentials( - sharedCfg.CredentialProcess, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err } + cfg.Credentials = creds } - return nil -} - -func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { - return stscreds.NewCredentials( - &Session{ - Config: &cfg, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { return nil } -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } } -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } } func initHandlers(s *Session) { @@ -630,7 +617,7 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current Session, coping the config +// Copy creates and returns a copy of the current Session, copying the config // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // @@ -650,47 +637,67 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) } return client.Config{ Config: s.Config, Handlers: s.Handlers, + PartitionID: resolved.PartitionID, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -700,12 +707,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -717,3 +721,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 7cb44021..680805a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,10 +2,11 @@ package session import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -16,57 +17,75 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` // Additional Config fields regionKey = `region` // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process - credentialProcessKey = `credential_process` + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` -) -type assumeRoleConfig struct { - RoleARN string - SourceProfile string - CredentialSource string - ExternalID string - MFASerial string - RoleSessionName string -} + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. // // aws_access_key_id // aws_secret_access_key // aws_session_token Creds credentials.Value - AssumeRole assumeRoleConfig - AssumeRoleSource *sharedConfig + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration - // An external process to request credentials - CredentialProcess string + SourceProfileName string + SourceProfile *sharedConfig - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. // // region Region string @@ -76,6 +95,30 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool } type sharedConfigFile struct { @@ -83,17 +126,18 @@ type sharedConfigFile struct { IniData ini.Sections } -// loadSharedConfig retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in // earlier files. // // For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. +// of the files are A then B, B's credential values will be used instead of +// A's. // // See sharedConfig.setFromFile for information how the config files // will be loaded. -func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { if len(profile) == 0 { profile = DefaultSharedConfigProfile } @@ -104,16 +148,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) } cfg := sharedConfig{} - if err = cfg.setFromIniFiles(profile, files); err != nil { + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { return sharedConfig{}, err } - if len(cfg.AssumeRole.SourceProfile) > 0 { - if err := cfg.setAssumeRoleSource(profile, files); err != nil { - return sharedConfig{}, err - } - } - return cfg, nil } @@ -137,60 +176,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { return files, nil } -func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { - var assumeRoleSrc sharedConfig - - if len(cfg.AssumeRole.CredentialSource) > 0 { - // setAssumeRoleSource is only called when source_profile is found. - // If both source_profile and credential_source are set, then - // ErrSharedConfigSourceCollision will be returned - return ErrSharedConfigSourceCollision +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr } - // Multiple level assume role chains are not support - if cfg.AssumeRole.SourceProfile == origProfile { - assumeRoleSrc = *cfg - assumeRoleSrc.AssumeRole = assumeRoleConfig{} + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() } else { - err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) - if err != nil { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err } } + profiles[profile] = struct{}{} - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if err := cfg.validateCredentialType(); err != nil { + return err } - cfg.AssumeRoleSource = &assumeRoleSrc + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() - return nil -} - -func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { - // Trim files from the list that don't exist. - for _, f := range files { - if err := cfg.setFromIniFile(profile, f); err != nil { + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. if _, ok := err.(SharedConfigProfileNotExistsError); ok { - // Ignore proviles missings - continue + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } } return err } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg } return nil } -// setFromFile loads the configuration from the file using -// the profile provided. A sharedConfig pointer type value is used so that -// multiple config file loadings can be chained. +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { section, ok := file.IniData.GetSection(profile) if !ok { // Fallback to to alternate profile name: profile @@ -200,53 +267,176 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e } } - // Shared Credentials - akid := section.String(accessKeyIDKey) - secret := section.String(secretAccessKey) - if len(akid) > 0 && len(secret) > 0 { - cfg.Creds = credentials.Value{ - AccessKeyID: akid, - SecretAccessKey: secret, - SessionToken: section.String(sessionTokenKey), - ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d } - } - // Assume Role - roleArn := section.String(roleArnKey) - srcProfile := section.String(sourceProfileKey) - credentialSource := section.String(credentialSourceKey) - hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 - if len(roleArn) > 0 && hasSource { - cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - CredentialSource: credentialSource, - ExternalID: section.String(externalIDKey), - MFASerial: section.String(mfaSerialKey), - RoleSessionName: section.String(roleSessionNameKey), + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre } - } - // `credential_process` - if credProc := section.String(credentialProcessKey); len(credProc) > 0 { - cfg.CredentialProcess = credProc + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } } - // Region - if v := section.String(regionKey); len(v) > 0 { - cfg.Region = v + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision } return nil } +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string @@ -304,7 +494,8 @@ func (e SharedConfigProfileNotExistsError) Error() string { // profile contains assume role information, but that information is invalid // or not complete. type SharedConfigAssumeRoleError struct { - RoleARN string + RoleARN string + SourceProfile string } // Code is the short id of the error. @@ -314,8 +505,10 @@ func (e SharedConfigAssumeRoleError) Code() string { // Message is the description of the error func (e SharedConfigAssumeRoleError) Message() string { - return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", - e.RoleARN) + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) } // OrigErr is the underlying error that caused the failure. @@ -327,3 +520,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error { func (e SharedConfigAssumeRoleError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go index 244c86da..07ea799f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -1,8 +1,7 @@ package v4 import ( - "net/http" - "strings" + "github.com/aws/aws-sdk-go/internal/strings" ) // validator houses a set of rule needed for validation of a @@ -61,7 +60,7 @@ type patterns []string // been found func (p patterns) IsValid(value string) bool { for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + if strings.HasPrefixFold(value, pattern) { return true } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 00000000..f35fc860 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 00000000..fed5c859 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 00000000..02cbd97e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 523db79f..d71f7b3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -76,9 +76,14 @@ import ( ) const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" + awsV4Request = "aws4_request" // emptyStringSHA256 is a SHA256 of an empty string emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` @@ -87,9 +92,9 @@ const ( var ignoredHeaders = rules{ blacklist{ mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, }, }, } @@ -229,11 +234,9 @@ type signingCtx struct { DisableURIPathEscaping bool - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool + credValues credentials.Value + isPresign bool + unsignedPayload bool bodyDigest string signedHeaders string @@ -337,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Get() + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) if err != nil { return http.Header{}, err } @@ -532,39 +535,56 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildSignature() // depends on string to sign if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, + authHeaderSignatureElem + ctx.signature, } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) } return nil } -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { if ctx.isPresign { duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) } } func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) if ctx.isPresign { ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) @@ -588,8 +608,7 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { var headers []string headers = append(headers, "host") for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { + if !r.IsValid(k) { continue // ignored header } if ctx.SignedHeaderVals == nil { @@ -653,19 +672,15 @@ func (ctx *signingCtx) buildCanonicalString() { func (ctx *signingCtx) buildStringToSign() { ctx.stringToSign = strings.Join([]string{ authHeaderPrefix, - ctx.formattedTime, + formatTime(ctx.Time), ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), }, "\n") } func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) ctx.signature = hex.EncodeToString(signature) } @@ -687,7 +702,11 @@ func (ctx *signingCtx) buildBodyDigest() error { if !aws.IsReaderSeekable(ctx.Body) { return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) } - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) } if includeSHA256Header { @@ -722,22 +741,28 @@ func (ctx *signingCtx) removePresign() { ctx.Query.Del("X-Amz-SignedHeaders") } -func makeHmac(key []byte, data []byte) []byte { +func hmacSHA256(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } -func makeSha256(data []byte) []byte { +func hashSHA256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) } -func makeSha256Reader(reader io.ReadSeeker) []byte { +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { hash := sha256.New() - start, _ := reader.Seek(0, sdkio.SeekCurrent) - defer reader.Seek(start, sdkio.SeekStart) + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. @@ -748,7 +773,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { io.CopyN(hash, reader, size) } - return hash.Sum(nil) + return hash.Sum(nil), nil } const doubleSpace = " " @@ -794,3 +819,28 @@ func stripExcessSpaces(vals []string) { vals[i] = string(buf[:m]) } } + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 8b6f2342..98751ee8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -2,18 +2,24 @@ package aws import ( "io" + "strings" "sync" "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -43,7 +49,8 @@ func IsReaderSeekable(r io.Reader) bool { // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { @@ -199,3 +206,59 @@ func (b *WriteAtBuffer) Bytes() []byte { defer b.m.Unlock() return b.buf } + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index dfc3c822..c0506123 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.19.28" +const SDKVersion = "1.32.7" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 00000000..876dcb3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index f9970337..cf9fad81 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -162,7 +162,7 @@ loop: if len(tokens) == 0 { break loop } - + // if should skip is true, we skip the tokens until should skip is set to false. step = SkipTokenState } @@ -218,7 +218,7 @@ loop: // S -> equal_expr' expr_stmt' switch k.Kind { case ASTKindEqualExpr: - // assiging a value to some key + // assigning a value to some key k.AppendChild(newExpression(tok)) stack.Push(newExprStatement(k)) case ASTKindExpr: @@ -250,6 +250,13 @@ loop: if !runeCompare(tok.Raw(), openBrace) { return nil, NewParseError("expected '['") } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } stmt := newStatement() stack.Push(stmt) @@ -304,7 +311,9 @@ loop: stmt := newCommentStatement(tok) stack.Push(stmt) default: - return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) } if len(tokens) > 0 { @@ -314,7 +323,7 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) } // returns a sublist which excludes the start symbol diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go index 6bb69644..da7a4049 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -22,24 +22,24 @@ func newSkipper() skipper { } func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). if s.shouldSkip && s.prevTok.Type() == TokenNL && tok.Type() != TokenWS { - s.Continue() return false } s.prevTok = tok - return s.shouldSkip } func (s *skipper) Skip() { s.shouldSkip = true - s.prevTok = emptyToken } func (s *skipper) Continue() { s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false s.prevTok = emptyToken } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000..6c443988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000..44898eed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000..810ec7f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000..f4651da2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000..b1d93a33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 00000000..d008ae27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000..14ad0c58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 00000000..e045f38d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go index ecc7bf82..15105497 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -101,7 +101,7 @@ func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { } headers.Set(h.Name, value) } - (*hs) = decodedHeaders(headers) + *hs = decodedHeaders(headers) return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go index 4b972b2d..47433939 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -21,10 +21,24 @@ type Decoder struct { // NewDecoder initializes and returns a Decoder for decoding event // stream messages from the reader provided. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ r: r, } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } } // Decode attempts to decode a single message from the event stream reader. @@ -40,6 +54,15 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { }() } + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { crc := crc32.New(crc32IEEETable) hashReader := io.TeeReader(reader, crc) @@ -72,12 +95,6 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { return m, nil } -// UseLogger specifies the Logger that that the decoder should use to log the -// message decode to. -func (d *Decoder) UseLogger(logger aws.Logger) { - d.logger = logger -} - func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { w := bytes.NewBuffer(nil) defer func() { logger.Log(w.String()) }() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go index 150a6098..ffade3bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -3,61 +3,107 @@ package eventstream import ( "bytes" "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" "hash" "hash/crc32" "io" + + "github.com/aws/aws-sdk-go/aws" ) // Encoder provides EventStream message encoding. type Encoder struct { - w io.Writer + w io.Writer + logger aws.Logger headersBuf *bytes.Buffer } // NewEncoder initializes and returns an Encoder to encode Event Stream // messages to an io.Writer. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ w: w, headersBuf: bytes.NewBuffer(nil), } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } } // Encode encodes a single EventStream message to the io.Writer the Encoder // was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) error { +func (e *Encoder) Encode(msg Message) (err error) { e.headersBuf.Reset() - err := encodeHeaders(e.headersBuf, msg.Headers) - if err != nil { + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { return err } crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(e.w, crc) + hashWriter := io.MultiWriter(writer, crc) headersLen := uint32(e.headersBuf.Len()) payloadLen := uint32(len(msg.Payload)) - if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { return err } if headersLen > 0 { - if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { return err } } if payloadLen > 0 { - if _, err := hashWriter.Write(msg.Payload); err != nil { + if _, err = hashWriter.Write(msg.Payload); err != nil { return err } } msgCRC := crc.Sum32() - return binary.Write(e.w, binary.BigEndian, msgCRC) + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) } func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { @@ -86,7 +132,9 @@ func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) return nil } -func encodeHeaders(w io.Writer, headers Headers) error { +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { for _, h := range headers { hn := headerName{ Len: uint8(len(h.Name)), diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go index 5ea5a988..34c2e89d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -1,6 +1,9 @@ package eventstreamapi -import "fmt" +import ( + "fmt" + "sync" +) type messageError struct { code string @@ -22,3 +25,53 @@ func (e messageError) Error() string { func (e messageError) OrigErr() error { return nil } + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go similarity index 77% rename from vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index 97937c8e..bb8ea5da 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -2,9 +2,7 @@ package eventstreamapi import ( "fmt" - "io" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" ) @@ -15,27 +13,8 @@ type Unmarshaler interface { UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error } -// EventStream headers with specific meaning to async API functionality. -const ( - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) - // EventReader provides reading from the EventStream of an reader. type EventReader struct { - reader io.ReadCloser decoder *eventstream.Decoder unmarshalerForEventType func(string) (Unmarshaler, error) @@ -47,27 +26,18 @@ type EventReader struct { // NewEventReader returns a EventReader built from the reader and unmarshaler // provided. Use ReadStream method to start reading from the EventStream. func NewEventReader( - reader io.ReadCloser, + decoder *eventstream.Decoder, payloadUnmarshaler protocol.PayloadUnmarshaler, unmarshalerForEventType func(string) (Unmarshaler, error), ) *EventReader { return &EventReader{ - reader: reader, - decoder: eventstream.NewDecoder(reader), + decoder: decoder, payloadUnmarshaler: payloadUnmarshaler, unmarshalerForEventType: unmarshalerForEventType, payloadBuf: make([]byte, 10*1024), } } -// UseLogger instructs the EventReader to use the logger and log level -// specified. -func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { - if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { - r.decoder.UseLogger(logger) - } -} - // ReadEvent attempts to read a message from the EventStream and return the // unmarshaled event value that the message is for. // @@ -95,8 +65,7 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case EventMessageType: return r.unmarshalEventMessage(msg) case ExceptionMessageType: - err = r.unmarshalEventException(msg) - return nil, err + return nil, r.unmarshalEventException(msg) case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: @@ -174,11 +143,6 @@ func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) return msgErr } -// Close closes the EventReader's EventStream reader. -func (r *EventReader) Close() error { - return r.reader.Close() -} - // GetHeaderString returns the value of the header as a string. If the header // is not set or the value is not a string an error will be returned. func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 00000000..e46b8acc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 00000000..3a7ba5cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 00000000..433bb163 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 00000000..10a3823d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go index e3fc0766..9f509d8f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -461,6 +461,11 @@ func (v *TimestampValue) decode(r io.Reader) error { return nil } +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + func timeFromEpochMilli(t int64) time.Time { secs := t / 1e3 msec := t % 1e3 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 2dc012a6..25c9783c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -27,7 +27,7 @@ func (m *Message) rawMessage() (rawMessage, error) { if len(m.Headers) > 0 { var headers bytes.Buffer - if err := encodeHeaders(&headers, m.Headers); err != nil { + if err := EncodeHeaders(&headers, m.Headers); err != nil { return rawMessage{}, err } raw.Headers = headers.Bytes() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000..864fb670 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000..5e949969 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,282 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go index e21614a1..0ea0647a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -64,7 +64,7 @@ func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error metadata.ClientInfo{}, request.Handlers{}, nil, - &request.Operation{HTTPMethod: "GET"}, + &request.Operation{HTTPMethod: "PUT"}, v, nil, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 00000000..9d521dcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 60e5b09d..d40346a7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" @@ -21,7 +21,7 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index 3495c730..9231e95d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" @@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed decoding Query response", err), + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 46d354e8..831b0110 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,73 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to read from query HTTP response body", err), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr), + awserr.New(respErr.Code, respErr.Message, nil), r.HTTPResponse.StatusCode, - r.RequestID, + reqID, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index b80f84fb..1301b149 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -25,6 +25,8 @@ var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps @@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -152,7 +162,7 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect. if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } name = strings.TrimSpace(name) @@ -170,7 +180,7 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } keyStr := strings.TrimSpace(key.String()) @@ -186,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -219,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 33fd53b1..92f8b4d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -28,7 +29,9 @@ var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } } } @@ -40,12 +43,21 @@ func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") } if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } } } -func unmarshalBody(r *request.Request, v reflect.Value) { +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) @@ -57,35 +69,38 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + payload.Set(reflect.ValueOf(b)) + case *string: defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + default: switch payload.Type().String() { case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + return awserr.New(request.ErrCodeSerialization, "failed to read response body", err) - return } payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -94,9 +109,11 @@ func unmarshalBody(r *request.Request, v reflect.Value) { } } } + + return nil } -func unmarshalLocationElements(r *request.Request, v reflect.Value) { +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -111,26 +128,25 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { switch field.Tag.Get("location") { case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + unmarshalStatusCode(m, resp.StatusCode) + case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + case "headers": prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } - if r.Error != nil { - return - } } + + return nil } func unmarshalStatusCode(v reflect.Value, statusCode int) { @@ -145,29 +161,45 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } } -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +210,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index b0f4e245..b1ae3648 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -2,8 +2,8 @@ // requests and responses. package restxml -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go import ( "bytes" @@ -37,8 +37,9 @@ func Build(r *request.Request) { err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, r.RequestID, ) return @@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) { err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "failed to decode REST XML response", err), + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f..d2f6dae5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,11 +52,12 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) default: panic("unknown timestamp format name, " + name) } @@ -62,10 +73,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go index da1a6811..f614ef89 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -19,3 +19,9 @@ func UnmarshalDiscardBody(r *request.Request) { io.Copy(ioutil.Discard, r.HTTPResponse.Body) r.HTTPResponse.Body.Close() } + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 00000000..cc857f13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index cf981fe9..09ad9515 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/private/protocol" @@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle return nil } + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + t := tag.Get("type") if t == "" { switch value.Kind() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000..c1a51185 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index ff1ef683..107c053f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -10,9 +11,27 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. @@ -45,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { // parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect // will be used to determine the type from r. func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + rtype := r.Type() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() // check kind of actual element type diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 515ce152..42f71648 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) { // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) if node.Text != "" { e.EncodeToken(xml.CharData([]byte(node.Text))) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 83a42d24..228cd3cf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "sync" - "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" @@ -15,11 +14,13 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) const opAbortMultipartUpload = "AbortMultipartUpload" @@ -66,11 +67,31 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// Aborts a multipart upload. +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. +// the part storage, you should call the ListParts operation and ensure that +// the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,6 +172,64 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // Completes a multipart upload by assembling previously uploaded parts. // +// You first initiate the multipart upload and then upload all parts using the +// UploadPart operation. After successfully uploading all relevant parts of +// an upload, you call this operation to complete the upload. Upon receiving +// this request, Amazon S3 concatenates all the parts in ascending order by +// part number to create a new object. In the Complete Multipart Upload request, +// you must provide the parts list. You must ensure that the parts list is complete. +// This operation concatenates the parts that you provide in the list. For each +// part in the list, you must provide the part number and the ETag value, returned +// after that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// GetBucketLifecycle has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -225,6 +304,153 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Creates a copy of an object that is already stored in Amazon S3. // +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// Metadata +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// +// x-amz-copy-source-if Headers +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: +// +// * x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition evaluates to true +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// Encryption +// +// The source object that you are copying can be encrypted or unencrypted. The +// source object can be encrypted with server-side encryption using AWS managed +// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption +// key. With server-side encryption, Amazon S3 encrypts your data as it writes +// it to disks in its data centers and decrypts the data when you access it. +// +// You can optionally use the appropriate encryption-related headers to request +// server-side encryption for the target object. You have the option to provide +// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form +// of server-side encryption that was used to encrypt the source object. You +// can even request encryption if the source object was not encrypted. For more +// information about server-side encryption, see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see . +// +// The following operations are related to CopyObject: +// +// * PutObject +// +// * GetObject +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -235,7 +461,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" // The source object of the COPY operation is not in the active tier and is -// only stored in Amazon Glacier. +// only stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -303,7 +529,67 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new bucket. +// Creates a new bucket. To create a bucket, you must register with Amazon S3 +// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests +// are never allowed to create buckets. By creating the bucket, you become the +// bucket owner. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the Europe (Ireland) Region. For more information, see +// How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -318,6 +604,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // by all users of the system. Please select a different name and try again. // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { @@ -385,13 +676,154 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// Initiates a multipart upload and returns an upload ID. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart). You also include this upload ID in the final request +// to either complete or abort the multipart upload request. +// +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart) and UploadPartCopy) requests +// must match the headers you used in the request to initiate the upload by +// using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see +// Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -466,8 +898,14 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // DeleteBucket API operation for Amazon Simple Storage Service. // -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// Deletes the bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -545,6 +983,23 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// +// * +// +// * +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -618,7 +1073,20 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // DeleteBucketCors API operation for Amazon Simple Storage Service. // -// Deletes the CORS configuration information set for the bucket. +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -693,7 +1161,23 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// Deletes the server-side encryption configuration from the bucket. +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption +// +// * GetBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -771,6 +1255,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -844,7 +1345,27 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deletes the lifecycle configuration from the bucket. +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -919,8 +1440,28 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -995,7 +1536,29 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// Deletes the policy from the bucket. +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1070,10 +1633,26 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication +// +// * GetBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1149,6 +1728,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // Deletes the tags from the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging +// +// * PutBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1222,7 +1811,26 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration from the bucket. +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1300,6 +1908,29 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // marker, which becomes the latest version of the object. If there isn't a // null version, Amazon S3 does not remove any objects. // +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for +// you. If you want to block users or accounts from removing or deleting objects +// from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, +// and s3:PutLifeCycleConfiguration actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1372,7 +2003,21 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // -// Removes the tag-set from an existing object. +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging +// +// * GetObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1441,13 +2086,57 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque output = &DeleteObjectsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // DeleteObjects API operation for Amazon Simple Storage Service. // // This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1522,7 +2211,21 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // -// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * GetBucketPolicyStatus // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1596,7 +2299,32 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Returns the accelerate configuration of a bucket. +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1670,7 +2398,15 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// Gets the access control policy for the bucket. +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1744,8 +2480,27 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1819,7 +2574,20 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the CORS configuration for the bucket. +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors +// +// * DeleteBucketCors // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1893,7 +2661,21 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the server-side encryption configuration of a bucket. +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1967,8 +2749,25 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2047,7 +2846,34 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration. +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2125,7 +2951,37 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Returns the lifecycle configuration information set on the bucket. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are still using previous version +// of the lifecycle configuration, it works. For the earlier API description, +// see GetBucketLifecycle. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2199,7 +3055,17 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket. +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2276,6 +3142,12 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // Returns the logging status of a bucket and the permissions users have to // view and modify that status. To use GET, you must be the bucket owner. // +// The following operations are related to GetBucketLogging: +// +// * CreateBucket +// +// * PutBucketLogging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2349,7 +3221,26 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2428,7 +3319,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketNotificationConfiguration operation. +// No longer used, see GetBucketNotificationConfiguration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2508,6 +3399,22 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2580,7 +3487,26 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2655,7 +3581,22 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // GetBucketPolicyStatus API operation for Amazon Simple Storage Service. // // Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2735,6 +3676,25 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // to all Amazon S3 systems. Therefore, a get request soon after put or delete // can return a wrong result. // +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2807,7 +3767,13 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // -// Returns the request payment configuration of a bucket. +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2883,6 +3849,21 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // Returns the tag set associated with the bucket. // +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2957,8 +3938,22 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // Returns the versioning state of a bucket. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject +// +// * PutObject +// +// * DeleteObject +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's @@ -3029,7 +4024,21 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // -// Returns the website configuration for a bucket. +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3103,7 +4112,130 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl. +// +// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE +// storage classes, before you can retrieve the object you must first restore +// a copy using . Otherwise, this operation returns an InvalidObjectStateError +// error. For information about restoring archived objects, see Restoring Archived +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning. +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3182,7 +4314,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // -// Returns the access control list (ACL) of an object. +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject +// +// * DeleteObject +// +// * PutObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3261,7 +4407,8 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Gets an object's current Legal Hold status. +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3337,7 +4484,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // // Gets the Object Lock configuration for a bucket. The rule specified in the // Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3411,7 +4559,8 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // GetObjectRetention API operation for Amazon Simple Storage Service. // -// Retrieves an object's retention settings. +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3485,7 +4634,25 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // GetObjectTagging API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3559,7 +4726,19 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size and +// that are not encrypted using server-side encryption with customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3633,7 +4812,30 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3709,7 +4911,15 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // // This operation is useful to determine if a bucket exists and you have permission -// to access it. +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3792,6 +5002,63 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // object itself. This operation is useful if you're only interested in an object's // metadata. To use HEAD, you must have READ access to the object. // +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject +// // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. // @@ -3867,7 +5134,33 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the analytics configurations for the bucket. +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration +// +// * DeleteBucketAnalyticsConfiguration +// +// * PutBucketAnalyticsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3941,7 +5234,33 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. // -// Returns a list of inventory configurations for the bucket. +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4015,7 +5334,34 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the metrics configurations for the bucket. +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration +// +// * GetBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4169,7 +5515,40 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4210,7 +5589,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4242,10 +5621,12 @@ func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4299,7 +5680,24 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // -// Returns metadata about all of the versions of objects in a bucket. +// Returns metadata about all of the versions of objects in a bucket. You can +// also use request parameters as selection criteria to return metadata about +// a subset of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4340,7 +5738,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer // // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 // err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4372,10 +5770,12 @@ func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObje }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4429,9 +5829,27 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // ListObjects API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2, +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * CreateBucket +// +// * ListBuckets // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4477,7 +5895,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op // // Example iterating over at most 3 pages of a ListObjects operation. // pageNum := 0 // err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { +// func(page *s3.ListObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4509,10 +5927,12 @@ func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4566,10 +5986,34 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // ListObjectsV2 API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects. +// +// To get a list of your buckets, see ListBuckets. +// +// The following operations are related to ListObjectsV2: +// +// * GetObject +// +// * PutObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4615,7 +6059,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input // // Example iterating over at most 3 pages of a ListObjectsV2 operation. // pageNum := 0 // err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4647,10 +6091,12 @@ func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2 }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4705,6 +6151,33 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // ListParts API operation for Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload). This request +// returns a maximum of 1,000 uploaded parts. The default number of parts returned +// is 1,000 parts. You can restrict the number of parts returned by specifying +// the max-parts request parameter. If your multipart upload consists of more +// than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its +// value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4745,7 +6218,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts . // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// func(page *s3.ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 @@ -4777,10 +6250,12 @@ func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, f }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4829,7 +6304,41 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Sets the accelerate configuration of an existing bucket. +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration operation returns the transfer acceleration +// state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4899,12 +6408,101 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketAcl API operation for Amazon Simple Storage Service. // -// Sets the permissions on a bucket using access control lists (ACL). +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4980,7 +6578,50 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // // Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5050,12 +6691,58 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketCors API operation for Amazon Simple Storage Service. // -// Sets the CORS configuration for a bucket. +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors +// +// * DeleteBucketCors +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5125,13 +6812,38 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// Creates a new server-side encryption configuration (or replaces an existing -// one, if present). +// This implementation of the PUT operation uses the encryption subresource +// to set the default encryption state of an existing bucket. +// +// This implementation of the PUT operation sets default encryption for a bucket +// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS +// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 +// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5206,8 +6918,54 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5282,12 +7040,64 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration. +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle(Deprecated) +// +// * GetBucketLifecycleConfiguration +// +// * +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5361,13 +7171,78 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle. +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5437,21 +7312,68 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLogging API operation for Amazon Simple Storage Service. // // Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of // a bucket, you must be the bucket owner. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. // -// See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutBucketLogging for usage and error information. +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket. For more +// information about returning the logging status of a bucket, see GetBucketLogging. +// +// The following operations are related to PutBucketLogging: +// +// * PutObject +// +// * DeleteBucket +// +// * CreateBucket +// +// * GetBucketLogging +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) @@ -5520,7 +7442,33 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5595,6 +7543,10 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5679,7 +7631,55 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // -// Enables notifications of specified events for a bucket. +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5749,13 +7749,37 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5825,15 +7849,66 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket where you want +// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +// replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. All rules must specify +// the same destination bucket. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5902,6 +7977,10 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5910,8 +7989,14 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// +// * GetBucketRequestPayment // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5981,6 +8066,10 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -5988,6 +8077,47 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // Sets the tags for a bucket. // +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6056,6 +8186,10 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6064,6 +8198,38 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // Sets the versioning state of an existing bucket. To set the versioning state, // you must be the bucket owner. // +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetBucketVersioning +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6132,12 +8298,81 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketWebsite API operation for Amazon Simple Storage Service. // -// Set the website configuration for a bucket. +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6211,7 +8446,70 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // PutObject API operation for Amazon Simple Storage Service. // -// Adds an object to a bucket. +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys. For more information, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the STANDARD storage class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different storage class. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning. +// +// Related Resources +// +// * CopyObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6280,13 +8578,97 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request output = &PutObjectAclOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectAcl API operation for Amazon Simple Storage Service. // -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket +// Uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket. You must have WRITE_ACP permission +// to set the ACL of an object. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6360,6 +8742,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6367,6 +8753,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // // Applies a Legal Hold configuration to the specified object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6434,6 +8824,10 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6443,6 +8837,13 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. // +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6510,6 +8911,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6517,6 +8922,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // // Places an Object Retention configuration on an object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6584,12 +8993,53 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request output = &PutObjectTaggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectTagging API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Sets the supplied tag-set to an object that already exists in a bucket. +// +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging. +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6659,13 +9109,39 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req output = &PutPublicAccessBlockOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutPublicAccessBlock API operation for Amazon Simple Storage Service. // // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock +// +// * GetBucketPolicyStatus +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6741,6 +9217,190 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restores an archived copy of an object back into Amazon S3 // +// This operation performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring Archives +// +// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To +// access an archived object, you must first initiate a restore request. This +// restores a temporary copy of the archived object. In a restore request, you +// specify the number of days that you want the restored copy to exist. After +// the specified period, Amazon S3 deletes the temporary copy but the object +// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object +// was restored from. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// The time it takes restore jobs to finish depends on which storage class the +// object is being restored from and which data access tier you specify. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for +// a subset of archives are required. For all but the largest archived objects +// (250 MB+), data accessed using Expedited retrievals are typically made +// available within 1–5 minutes. Provisioned capacity ensures that retrieval +// capacity for Expedited retrievals is available when you need it. Expedited +// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE +// storage class. +// +// * Standard - S3 Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for the GLACIER +// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval +// option. S3 Standard retrievals typically complete within 3-5 hours from +// the GLACIER storage class and typically complete within 12 hours from +// the DEEP_ARCHIVE storage class. +// +// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval +// option, enabling you to retrieve large amounts, even petabytes, of data +// inexpensively in a day. Bulk retrievals typically complete within 5-12 +// hours from the GLACIER storage class and typically complete within 48 +// hours from the DEEP_ARCHIVE storage class. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. You upgrade the speed of an in-progress +// restoration by issuing another restore request to the same object, setting +// a new Tier request element. When issuing a request to upgrade the restore +// tier, you must choose a tier that is faster than the tier that the in-progress +// restore is using. You must not change any other parameters, such as the Days +// request element. For more information, see Upgrading the Speed of an In-Progress +// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object copy is not previously restored, then Amazon S3 returns +// 202 Accepted in the response. +// +// * If the object copy is previously restored, Amazon S3 returns 200 OK +// in the response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited +// retrievals are currently not available. Try again later. (Returned if +// there is insufficient capacity to process the Expedited request. This +// error applies only to Expedited retrievals and not to S3 Standard or Bulk +// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketNotificationConfiguration +// +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6750,7 +9410,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier +// This operation is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -6813,20 +9473,103 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) + + es := newSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) return } // SelectObjectContent API operation for Amazon Simple Storage Service. // // This operation filters the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON or -// CSV) of the object. Amazon S3 uses this to parse object data into records, -// and returns only records that match the specified SQL expression. You must -// also specify the data serialization format for the response. +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject. For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see RESTSelectObjectAppendix . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject. +// +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest$ScanRange in the request parameters), +// you cannot specify the range of bytes of an object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation, see SelectObjectContentErrorCodeList +// +// Related Resources +// +// * GetObject +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6856,35 +9599,176 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject return out, req.Send() } -const opUploadPart = "UploadPart" +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +type SelectObjectContentEventStream struct { -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UploadPart for more information on using the UploadPart -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { - op := &request.Operation{ - Name: opUploadPart, + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +func newSelectObjectContentEventStream() *SelectObjectContentEventStream { + return &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, HTTPMethod: "PUT", HTTPPath: "/{Bucket}/{Key+}", } @@ -6902,12 +9786,87 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Uploads a part in a multipart upload. // +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload) before you +// can upload any part. In response to your initiate request, Amazon S3 returns +// an upload ID, a unique identifier, that you must include in your upload part +// request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// // Note: After you initiate multipart upload and upload one or more parts, you // must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. // +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload. For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload. +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6980,7 +9939,94 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // UploadPartCopy API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// operation and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject and +// UploadPart. +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7010,13 +10056,16 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. DaysAfterInitiation *int64 `type:"integer"` } @@ -7037,20 +10086,34 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI } type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key of the object for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Upload ID that identifies the multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -7121,6 +10184,20 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -7145,10 +10222,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. + // Specifies the transfer acceleration status of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -7168,12 +10248,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } +// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -7223,7 +10305,9 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { type AccessControlTranslation struct { _ struct{} `type:"structure"` - // The override value for the owner of the replica object. + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. // // Owner is a required field Owner *string `type:"string" required:"true" enum:"OwnerOverride"` @@ -7258,10 +10342,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. type AnalyticsAndOperator struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate. + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -7310,6 +10398,8 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -7318,13 +10408,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -7384,6 +10474,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } +// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -7427,6 +10518,9 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -7489,25 +10583,28 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } +// Contains information about where to publish the analytics results. type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon resource name (ARN) of the bucket to which data is exported. + // The Amazon Resource Name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. BucketAccountId *string `type:"string"` - // The file format used when exporting data to Amazon S3. + // Specifies the file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The exported data begins with this - // prefix. + // The prefix to use when exporting data. The prefix is prepended to all results. Prefix *string `type:"string"` } @@ -7568,6 +10665,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. type Bucket struct { _ struct{} `type:"structure"` @@ -7600,9 +10699,14 @@ func (s *Bucket) SetName(v string) *Bucket { return s } +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -7646,12 +10750,14 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } +// Container for logging status information. type BucketLoggingStatus struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -7686,9 +10792,16 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -7732,14 +10845,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } +// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -7815,7 +10932,8 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Describes how a CSV-formatted input object is formatted. +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. type CSVInput struct { _ struct{} `type:"structure"` @@ -7824,24 +10942,45 @@ type CSVInput struct { // to TRUE may lower performance. AllowQuotedRecordDelimiter *bool `type:"boolean"` - // The single character used to indicate a row should be ignored when present - // at the start of a row. + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. Comments *string `type:"string"` - // The value used to separate individual fields in a record. + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Describes the first line of input. Valid values: None, Ignore, Use. + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string `type:"string"` - // The single character used for escaping the quote character inside an already - // escaped value. + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". QuoteEscapeCharacter *string `type:"string"` - // The value used to separate individual records. + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7897,24 +11036,33 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { return s } -// Describes how CSV-formatted results are formatted. +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. type CSVOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual fields in a record. + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // The value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". QuoteCharacter *string `type:"string"` - // Th single character used for escaping the quote character inside an already + // The single character used for escaping the quote character inside an already // escaped value. QuoteEscapeCharacter *string `type:"string"` - // Indicates whether or not all output fields should be quoted. + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. QuoteFields *string `type:"string" enum:"QuoteFields"` - // The value used to separate individual records. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7958,9 +11106,12 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } +// Container for specifying the AWS Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. CloudFunction *string `type:"string"` // The bucket event for which to send notifications. @@ -7968,12 +11119,14 @@ type CloudFunctionConfiguration struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // Bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The role supporting the invocation of the Lambda function InvocationRole *string `type:"string"` } @@ -8017,9 +11170,15 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. type CommonPrefix struct { _ struct{} `type:"structure"` + // Container for the specified common prefix. Prefix *string `type:"string"` } @@ -8040,22 +11199,30 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { } type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The container for the multipart upload request information. MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // ID for the initiated multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -8132,35 +11299,61 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the newly created object. Bucket *string `type:"string"` - // Entity tag of the object. + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The object key of the newly created object. Key *string `min:"1" type:"string"` + // The URI that identifies the newly created object. Location *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -8235,9 +11428,11 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } +// The container for the completed multipart upload details. type CompletedMultipartUpload struct { _ struct{} `type:"structure"` + // Array of CompletedPart data types. Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } @@ -8257,6 +11452,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } +// Details of the parts that were uploaded. type CompletedPart struct { _ struct{} `type:"structure"` @@ -8290,6 +11486,10 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. type Condition struct { _ struct{} `type:"structure"` @@ -8358,12 +11558,19 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + type CopyObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the destination bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8403,17 +11610,18 @@ type CopyObjectInput struct { // Copies the object if it hasn't been modified since the specified time. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. @@ -8431,6 +11639,8 @@ type CopyObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The key of the destination object. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -8450,35 +11660,44 @@ type CopyObjectInput struct { // The date and time when you want the copied object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. @@ -8486,7 +11705,7 @@ type CopyObjectInput struct { // The tag-set for the object destination object this value must be used in // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters + // Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // Specifies whether the object tag-set are copied from the source object or @@ -8735,6 +11954,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -8771,11 +11996,27 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` + // Version of the copied object in the destination bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. @@ -8791,16 +12032,22 @@ type CopyObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. @@ -8853,6 +12100,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -8871,11 +12124,16 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } +// Container for all response elements. type CopyObjectResult struct { _ struct{} `type:"structure"` + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. ETag *string `type:"string"` + // Returns the date that the object was last modified. LastModified *time.Time `type:"timestamp"` } @@ -8901,6 +12159,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } +// Container for all response elements. type CopyPartResult struct { _ struct{} `type:"structure"` @@ -8933,11 +12192,12 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } +// The configuration information for the bucket. type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -8958,14 +12218,17 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke } type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // The name of the bucket to create. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The configuration information for the bucket. CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -9078,6 +12341,9 @@ func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketI type CreateBucketOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. Location *string `location:"header" locationName:"Location" type:"string"` } @@ -9098,11 +12364,13 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { } type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the bucket to which to initiate the upload + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9138,6 +12406,8 @@ type CreateMultipartUploadInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the multipart upload is to be initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9153,41 +12423,50 @@ type CreateMultipartUploadInput struct { // Specifies the date and time when you want the Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object @@ -9368,6 +12647,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -9398,17 +12683,47 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` // Object key for which the multipart upload was initiated. @@ -9424,16 +12739,22 @@ type CreateMultipartUploadOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -9499,6 +12820,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { s.SSEKMSKeyId = &v @@ -9561,9 +12888,12 @@ func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { return s } +// Container for the objects to delete. type Delete struct { _ struct{} `type:"structure"` + // The objects to delete. + // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -9618,14 +12948,14 @@ func (s *Delete) SetQuiet(v bool) *Delete { } type DeleteBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is deleted. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -9679,6 +13009,20 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9694,8 +13038,10 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { } type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + // Specifies the bucket whose cors configuration is being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9739,6 +13085,20 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -9754,7 +13114,7 @@ func (s DeleteBucketCorsOutput) GoString() string { } type DeleteBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` // The name of the bucket containing the server-side encryption configuration // to delete. @@ -9802,6 +13162,20 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -9817,8 +13191,10 @@ func (s DeleteBucketEncryptionOutput) GoString() string { } type DeleteBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + // Specifies the bucket being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9862,8 +13238,22 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to delete. // @@ -9924,6 +13314,20 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9939,8 +13343,10 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { } type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + // The bucket name of the lifecycle to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9984,6 +13390,20 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -9999,7 +13419,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string { } type DeleteBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to delete. // @@ -10060,6 +13480,20 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -10089,8 +13523,10 @@ func (s DeleteBucketOutput) GoString() string { } type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10134,6 +13570,20 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -10149,13 +13599,10 @@ func (s DeleteBucketPolicyOutput) GoString() string { } type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` // The bucket name. // - // It can take a while to propagate the deletion of a replication configuration - // to all Amazon S3 systems. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10199,6 +13646,20 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -10214,8 +13675,10 @@ func (s DeleteBucketReplicationOutput) GoString() string { } type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + // The bucket that has the tag set to be removed. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10259,6 +13722,20 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -10274,8 +13751,10 @@ func (s DeleteBucketTaggingOutput) GoString() string { } type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + // The bucket name for which you want to remove the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10319,6 +13798,20 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -10333,6 +13826,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } +// Information about the delete marker. type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -10346,6 +13840,7 @@ type DeleteMarkerEntry struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // The account that created the delete marker.> Owner *Owner `type:"structure"` // Version ID of an object. @@ -10392,11 +13887,21 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 should replicate delete makers. +// Specifies whether Amazon S3 replicates the delete markers. If you specify +// a Filter, you must specify this element. However, in the latest version of +// replication configuration (when Filter is specified), Amazon S3 doesn't replicate +// delete markers. Therefore, the DeleteMarkerReplication element can contain +// only Disabled. For an example configuration, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, Amazon +// S3 handled replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` - // The status of the delete marker replication. + // Indicates whether to replicate delete markers. // // In the current implementation, Amazon S3 doesn't replicate the delete markers. // The status must be Disabled. @@ -10420,8 +13925,17 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { } type DeleteObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10429,17 +13943,22 @@ type DeleteObjectInput struct { // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key name of the object to delete. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -10521,6 +14040,20 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -10566,11 +14099,22 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { } type DeleteObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10635,6 +14179,20 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -10659,8 +14217,17 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO } type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -10669,17 +14236,22 @@ type DeleteObjectsInput struct { // operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Container for the request. + // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -10754,11 +14326,29 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` + // Container element for a successful delete. It identifies the object that + // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the @@ -10795,7 +14385,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { } type DeletePublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. // @@ -10842,6 +14432,20 @@ func (s *DeletePublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -10856,15 +14460,24 @@ func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } +// Information about the deleted object. type DeletedObject struct { _ struct{} `type:"structure"` + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. DeleteMarker *bool `type:"boolean"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. DeleteMarkerVersionId *string `type:"string"` + // The name of the deleted object. Key *string `min:"1" type:"string"` + // The version ID of the deleted object. VersionId *string `type:"string"` } @@ -10902,33 +14515,28 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// A container for information about the replication destination. +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { _ struct{} `type:"structure"` - // A container for information about access control for replicas. - // - // Use this element only in a cross-account scenario where source and destination - // bucket owners are not the same to change replica ownership to the AWS account - // that owns the destination bucket. If you don't add this element to the replication - // configuration, the replicas are owned by same AWS account that owns the source - // object. + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. AccessControlTranslation *AccessControlTranslation `type:"structure"` - // The account ID of the destination bucket. Currently, Amazon S3 verifies this - // value only if Access Control Translation is enabled. - // - // In a cross-account scenario, if you change replica ownership to the AWS account - // that owns the destination bucket by adding the AccessControlTranslation element, - // this is the account ID of the owner of the destination bucket. + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store replicas of the object identified by the rule. - // - // If there are multiple rules in your replication configuration, all rules - // must specify the same bucket as the destination. A replication configuration - // can replicate objects to only one destination bucket. + // store the results. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -10937,8 +14545,23 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // The class of storage used to store the object. By default Amazon S3 uses - // storage class of the source object when creating a replica. + // A container specifying replication metrics-related settings enabling metrics + // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified + // together with a ReplicationTime block. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -10963,6 +14586,16 @@ func (s *Destination) Validate() error { invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) } } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11001,19 +14634,30 @@ func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *De return s } +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Describes the server-side encryption that will be applied to the restore -// results. +// Contains the type of server-side encryption used. type Encryption struct { _ struct{} `type:"structure"` // The server-side encryption algorithm used when storing job results in Amazon - // S3 (e.g., AES256, aws:kms). + // S3 (for example, AES256, aws:kms). // // EncryptionType is a required field EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` @@ -11022,8 +14666,11 @@ type Encryption struct { // the encryption context for the restore results. KMSContext *string `type:"string"` - // If the encryption type is aws:kms, this optional value specifies the AWS - // KMS key ID to use for encryption of job results. + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSKeyId *string `type:"string" sensitive:"true"` } @@ -11068,13 +14715,17 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { return s } -// A container for information about the encryption-based configuration for -// replicas. +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The ID of the AWS KMS key for the AWS Region where the destination bucket - // resides. Amazon S3 uses this key to encrypt the replica object. + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -11094,6 +14745,9 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig return s } +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } @@ -11120,15 +14774,380 @@ func (s *EndEvent) UnmarshalEvent( return nil } +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. type Error struct { _ struct{} `type:"structure"` + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client Code *string `type:"string"` + // The error key. Key *string `min:"1" type:"string"` + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. Message *string `type:"string"` + // The version ID of the error. VersionId *string `type:"string"` } @@ -11166,6 +15185,7 @@ func (s *Error) SetVersionId(v string) *Error { return s } +// The error information. type ErrorDocument struct { _ struct{} `type:"structure"` @@ -11207,18 +15227,58 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } -// A container for a key value pair that defines the criteria for the filter -// rule. +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. type FilterRule struct { _ struct{} `type:"structure"` // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum prefix length is 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` + // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -11245,7 +15305,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule { } type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` // Name of the bucket for which the accelerate configuration is retrieved. // @@ -11292,6 +15352,20 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -11316,8 +15390,10 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA } type GetBucketAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + // Specifies the S3 bucket whose ACL is being requested. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11361,12 +15437,27 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -11393,14 +15484,14 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { } type GetBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` // The name of the bucket from which an analytics configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -11454,6 +15545,20 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -11478,8 +15583,10 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana } type GetBucketCorsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + // The bucket name for which to get the cors configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11523,9 +15630,25 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } @@ -11546,7 +15669,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { } type GetBucketEncryptionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` // The name of the bucket from which the server-side encryption configuration // is retrieved. @@ -11594,11 +15717,24 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } @@ -11619,7 +15755,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv } type GetBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` // The name of the bucket containing the inventory configuration to retrieve. // @@ -11680,6 +15816,20 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -11704,8 +15854,10 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv } type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11749,9 +15901,24 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11772,8 +15939,10 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge } type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11817,9 +15986,24 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11840,8 +16024,10 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput } type GetBucketLocationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + // The name of the bucket for which to get the location. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11885,9 +16071,26 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -11908,8 +16111,10 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca } type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + // The bucket name for which to get the logging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11953,12 +16158,27 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` - // Container for logging information. Presence of this element indicates that - // logging is enabled. Parameters TargetBucket and TargetPrefix are required - // in this case. + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -11979,7 +16199,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket } type GetBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` // The name of the bucket containing the metrics configuration to retrieve. // @@ -12040,6 +16260,20 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -12064,9 +16298,9 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics } type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12111,9 +16345,25 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + // The bucket name for which to get the bucket policy. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12157,6 +16407,20 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -12181,7 +16445,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { } type GetBucketPolicyStatusInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` // The name of the Amazon S3 bucket whose policy status you want to retrieve. // @@ -12228,6 +16492,20 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` @@ -12252,8 +16530,10 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke } type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + // The bucket name for which to get the replication information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12297,6 +16577,20 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -12322,8 +16616,10 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC } type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + // The name of the bucket for which to get the payment request configuration + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12367,6 +16663,20 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -12391,8 +16701,10 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym } type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + // The name of the bucket for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12436,9 +16748,25 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -12460,8 +16788,10 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { } type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + // The name of the bucket for which to get the versioning information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12505,6 +16835,20 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -12540,8 +16884,10 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp } type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + // The bucket name for which to get the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12585,15 +16931,34 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` + // The object key name of the website error document to use for 4XX class errors. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website (for example index.html). IndexDocument *IndexDocument `type:"structure"` + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -12632,18 +16997,30 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb } type GetObjectAclInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key of the object for which to get the ACL information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -12713,12 +17090,27 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` // If present, indicates that the requester was successfully charged for the @@ -12755,8 +17147,17 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { } type GetObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12776,6 +17177,8 @@ type GetObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key of the object to get. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -12785,13 +17188,17 @@ type GetObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. @@ -12812,19 +17219,20 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -12991,10 +17399,32 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - // The bucket containing the object whose Legal Hold status you want to retrieve. + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13004,10 +17434,11 @@ type GetObjectLegalHoldInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose Legal Hold status you want to retrieve. @@ -13077,6 +17508,20 @@ func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInpu return s } +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` @@ -13101,7 +17546,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje } type GetObjectLockConfigurationInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` // The bucket whose Object Lock configuration you want to retrieve. // @@ -13148,6 +17593,20 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` @@ -13174,6 +17633,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -13207,11 +17667,11 @@ type GetObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -13223,6 +17683,10 @@ type GetObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -13244,6 +17708,8 @@ type GetObjectOutput struct { // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the @@ -13260,18 +17726,21 @@ type GetObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -13483,9 +17952,17 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput } type GetObjectRetentionInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - // The bucket containing the object whose retention settings you want to retrieve. + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13495,10 +17972,11 @@ type GetObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -13568,6 +18046,20 @@ func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInpu return s } +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectRetentionOutput struct { _ struct{} `type:"structure" payload:"Retention"` @@ -13592,14 +18084,26 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje } type GetObjectTaggingInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which to get the tagging information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -13660,12 +18164,29 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + // The versionId of the object for which you got the tagging information. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -13692,18 +18213,24 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput } type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + // The name of the bucket containing the object for which to get the torrent + // files. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The object key for which to get the information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -13764,9 +18291,24 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` + // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser `type:"blob"` // If present, indicates that the requester was successfully charged for the @@ -13797,7 +18339,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu } type GetPublicAccessBlockInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you // want to retrieve. @@ -13845,6 +18387,20 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` @@ -13869,10 +18425,11 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public return s } +// Container for S3 Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -13907,9 +18464,11 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } +// Container for grant information. type Grant struct { _ struct{} `type:"structure"` + // The person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. @@ -13953,6 +18512,7 @@ func (s *Grant) SetPermission(v string) *Grant { return s } +// Container for the person being granted permissions. type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -13960,6 +18520,29 @@ type Grantee struct { DisplayName *string `type:"string"` // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. EmailAddress *string `type:"string"` // The canonical user ID of the grantee. @@ -14028,8 +18611,10 @@ func (s *Grantee) SetURI(v string) *Grantee { } type HeadBucketInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -14066,11 +18651,25 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { return s } -func (s *HeadBucketInput) getBucket() (v string) { +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { if s.Bucket == nil { - return v + return false } - return *s.Bucket + return arn.IsARN(*s.Bucket) } type HeadBucketOutput struct { @@ -14088,8 +18687,10 @@ func (s HeadBucketOutput) GoString() string { } type HeadObjectInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + // The name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14109,6 +18710,8 @@ type HeadObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // The object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -14119,28 +18722,32 @@ type HeadObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -14271,9 +18878,24 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Specifies caching behavior along the request/reply chain. @@ -14301,11 +18923,11 @@ type HeadObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -14317,6 +18939,10 @@ type HeadObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -14325,26 +18951,69 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // The Legal Hold status for the specified object. + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The Object Lock mode currently in place for this object. + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's Object Lock will expire. + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket where Amazon S3 stores object replicas. When you request + // an object (GetObject) or object metadata (HeadObject) from these buckets, + // Amazon S3 will return the x-amz-replication-status header in the response + // as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from the destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time - // of the restored object copy. + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, @@ -14353,18 +19022,25 @@ type HeadObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // Version of the object. @@ -14554,13 +19230,15 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } +// Container for the Suffix element. type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. // // Suffix is a required field Suffix *string `type:"string" required:"true"` @@ -14595,6 +19273,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } +// Container element that identifies who initiated the multipart upload. type Initiator struct { _ struct{} `type:"structure"` @@ -14680,6 +19359,9 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -14697,12 +19379,16 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Specifies which object version(s) to included in the inventory results. + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -14808,6 +19494,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } +// Specifies the inventory configuration for an Amazon S3 bucket. type InventoryDestination struct { _ struct{} `type:"structure"` @@ -14857,10 +19544,10 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin type InventoryEncryption struct { _ struct{} `type:"structure"` - // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } @@ -14901,6 +19588,8 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { return s } +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { _ struct{} `type:"structure"` @@ -14939,13 +19628,19 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` - // The ID of the account that owns the destination bucket. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. AccountId *string `type:"string"` - // The Amazon resource name (ARN) of the bucket where inventory results will + // The Amazon Resource Name (ARN) of the bucket where inventory results will // be published. // // Bucket is a required field @@ -15032,6 +19727,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } +// Specifies the schedule for generating inventory results. type InventorySchedule struct { _ struct{} `type:"structure"` @@ -15070,6 +19766,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +// Specifies JSON as object's input serialization format. type JSONInput struct { _ struct{} `type:"structure"` @@ -15093,10 +19790,12 @@ func (s *JSONInput) SetType(v string) *JSONInput { return s } +// Specifies JSON as request's output serialization format. type JSONOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual records in the output. + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). RecordDelimiter *string `type:"string"` } @@ -15120,7 +19819,7 @@ func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { type KeyFilter struct { _ struct{} `type:"structure"` - // A list of containers for the key value pair that defines the criteria for + // A list of containers for the key-value pair that defines the criteria for // the filter rule. FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } @@ -15145,11 +19844,15 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -15157,8 +19860,8 @@ type LambdaFunctionConfiguration struct { // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3 - // can invoke when it detects events of the specified type. + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. // // LambdaFunctionArn is a required field LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` @@ -15214,9 +19917,12 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } +// Container for lifecycle rules. You can add as many as 1000 rules. type LifecycleConfiguration struct { _ struct{} `type:"structure"` + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // // Rules is a required field Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -15260,6 +19966,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } +// Container for the expiration for the lifecycle of the object. type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -15306,13 +20013,19 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } +// A lifecycle rule for individual objects in an Amazon S3 bucket. type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. @@ -15329,6 +20042,11 @@ type LifecycleRule struct { // period in the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is @@ -15343,6 +20061,7 @@ type LifecycleRule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an Amazon S3 object transitions to a specified storage class. Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } @@ -15434,6 +20153,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` // All of these tags must exist in the object's tag set in order for the rule @@ -15549,7 +20269,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { } type ListBucketAnalyticsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` // The name of the bucket from which analytics configurations are retrieved. // @@ -15606,13 +20326,28 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` // The list of analytics configurations for a bucket. AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The ContinuationToken that represents where this request began. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. ContinuationToken *string `type:"string"` // Indicates whether the returned list of analytics configurations is complete. @@ -15661,7 +20396,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketInventoryConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` // The name of the bucket containing the inventory configurations to retrieve. // @@ -15720,6 +20455,20 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -15730,8 +20479,9 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. IsTruncated *bool `type:"boolean"` // The marker used to continue this inventory configuration listing. Use the @@ -15775,7 +20525,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str } type ListBucketMetricsConfigurationsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` // The name of the bucket containing the metrics configurations to retrieve. // @@ -15834,6 +20584,20 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -15907,8 +20671,10 @@ func (s ListBucketsInput) GoString() string { type ListBucketsOutput struct { _ struct{} `type:"structure"` + // The list of buckets owned by the requestor. Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + // The owner of the buckets listed. Owner *Owner `type:"structure"` } @@ -15935,12 +20701,28 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { } type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -15953,6 +20735,13 @@ type ListMultipartUploadsInput struct { // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -15961,12 +20750,16 @@ type ListMultipartUploadsInput struct { MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` // Lists in-progress uploads only for those keys that begin with the specified - // prefix. + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -16045,17 +20838,42 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` // Indicates whether the returned list of multipart uploads is truncated. A @@ -16086,6 +20904,8 @@ type ListMultipartUploadsOutput struct { // Upload ID after which listing began. UploadIdMarker *string `type:"string"` + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } @@ -16179,12 +20999,25 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti } type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + // The bucket name that contains the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -16198,11 +21031,19 @@ type ListObjectVersionsInput struct { // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Limits the response to keys that begin with the specified prefix. + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Specifies the object version you want to start listing from. @@ -16284,42 +21125,81 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Container for an object that is a delete marker. DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` - // Marks the last Key returned in a truncated response. + // Marks the last key returned in a truncated response. KeyMarker *string `type:"string"` + // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string `type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. NextVersionIdMarker *string `type:"string"` + // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` + // Container for version information. Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } @@ -16412,8 +21292,10 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe } type ListObjectsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + // The name of the bucket containing the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16431,8 +21313,9 @@ type ListObjectsInput struct { // Specifies the key to start with when listing objects in a bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -16519,26 +21402,65 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. IsTruncated *bool `type:"boolean"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. Marker *string `type:"string"` + // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response @@ -16550,6 +21472,7 @@ type ListObjectsOutput struct { // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` } @@ -16624,16 +21547,23 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { } type ListObjectsV2Input struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Name of the bucket to list. + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // ContinuationToken indicates Amazon S3 that the list is being continued on // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character you use to group keys. @@ -16644,11 +21574,12 @@ type ListObjectsV2Input struct { // The owner field is not present in listV2 by default, if you want to return // owner field with each key in the result then set the fetch owner field to - // true + // true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -16660,7 +21591,7 @@ type ListObjectsV2Input struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // listing after this specified key. StartAfter can be any key in the bucket. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -16751,29 +21682,65 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsV2Output struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // If ContinuationToken was sent with the request, it is included in the response. ContinuationToken *string `type:"string"` - // A delimiter is a character you use to group keys. + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will @@ -16781,24 +21748,31 @@ type ListObjectsV2Output struct { // result will include less than equals 50 keys KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `type:"integer"` - // Name of the bucket to list. + // Bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string"` - // NextContinuationToken is sent when isTruncated is true which means there + // NextContinuationToken is sent when isTruncated is true, which means there // are more keys in the bucket that can be listed. The next list requests to // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken // is obfuscated and is not a real key NextContinuationToken *string `type:"string"` - // Limits the response to keys that begin with the specified prefix. + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -16885,11 +21859,22 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { } type ListPartsInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + // Name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -16900,10 +21885,11 @@ type ListPartsInput struct { // part numbers will be listed. PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -16990,23 +21976,51 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListPartsOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` - // Identifies who initiated the multipart upload. + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. Initiator *Initiator `type:"structure"` - // Indicates whether the returned list of parts is truncated. + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. IsTruncated *bool `type:"boolean"` // Object key for which the multipart upload was initiated. @@ -17020,18 +22034,26 @@ type ListPartsOutput struct { // in a subsequent request. NextPartNumberMarker *int64 `type:"integer"` + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. Owner *Owner `type:"structure"` - // Part number after which listing begins. + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. PartNumberMarker *int64 `type:"integer"` + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []*Part `locationName:"Part" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // The class of storage used to store the object. + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -17139,7 +22161,8 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Describes an S3 location that will receive the results of the restore request. +// Describes an Amazon S3 location that will receive the results of the restore +// request. type Location struct { _ struct{} `type:"structure"` @@ -17154,8 +22177,7 @@ type Location struct { // The canned ACL to apply to the restore results. CannedACL *string `type:"string" enum:"ObjectCannedACL"` - // Describes the server-side encryption that will be applied to the restore - // results. + // Contains the type of server-side encryption used. Encryption *Encryption `type:"structure"` // The prefix that is prepended to the restore results for this request. @@ -17267,26 +22289,29 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Container for logging information. Presence of this element indicates that -// logging is enabled. Parameters TargetBucket and TargetPrefix are required -// in this case. +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` // Specifies the bucket where you want Amazon S3 to store server access logs. // You can have your logs delivered to any bucket that you own, including the // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should + // to deliver their logs to the same target bucket. In this case, you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. // // TargetBucket is a required field TargetBucket *string `type:"string" required:"true"` + // Container for granting information. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. // // TargetPrefix is a required field TargetPrefix *string `type:"string" required:"true"` @@ -17350,8 +22375,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` + // Name of the Object. Name *string `type:"string"` + // Value of the Object. Value *string `type:"string"` } @@ -17377,6 +22404,65 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } +// A container specifying replication metrics-related settings enabling metrics +// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified +// together with a ReplicationTime block. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + // + // EventThreshold is a required field + EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.EventThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("EventThreshold")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -17429,6 +22515,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -17483,6 +22576,9 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { _ struct{} `type:"structure"` @@ -17546,6 +22642,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } +// Container for the MultipartUpload for the Amazon S3 object. type MultipartUpload struct { _ struct{} `type:"structure"` @@ -17558,6 +22655,7 @@ type MultipartUpload struct { // Key of the object for which the multipart upload was initiated. Key *string `min:"1" type:"string"` + // Specifies the owner of the object that is part of the multipart upload. Owner *Owner `type:"structure"` // The class of storage used to store the object. @@ -17624,7 +22722,7 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -17646,19 +22744,19 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or -// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning // is suspended), you can set this action to request that Amazon S3 transition // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, -// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's // lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` @@ -17693,10 +22791,16 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // The topic to which notifications are sent and the events for which notifications + // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -17771,10 +22875,17 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -17806,8 +22917,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// A container for object key name filtering rules. For information about key -// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -17832,17 +22943,25 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } +// An object consists of data and its descriptive metadata. type Object struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of the object. ETag reflects only changes to + // the contents of an object, not its metadata. ETag *string `type:"string"` + // The name that you assign to an object. You use the object key to retrieve + // the object. Key *string `min:"1" type:"string"` + // The date the Object was Last Modified LastModified *time.Time `type:"timestamp"` + // The owner of the object Owner *Owner `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` // The class of storage used to store the object. @@ -17895,6 +23014,7 @@ func (s *Object) SetStorageClass(v string) *Object { return s } +// Object Identifier is unique value to identify objects. type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -18060,9 +23180,11 @@ func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRul return s } +// The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of that version of the object. ETag *string `type:"string"` // Specifies whether the object is (true) or is not (false) the latest version @@ -18075,6 +23197,7 @@ type ObjectVersion struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // Specifies the owner of the object. Owner *Owner `type:"structure"` // Size in bytes of the object. @@ -18217,11 +23340,14 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { return s } +// Container for the owner's display name and ID. type Owner struct { _ struct{} `type:"structure"` + // Container for the display name of the owner. DisplayName *string `type:"string"` + // Container for the ID of the owner. ID *string `type:"string"` } @@ -18247,6 +23373,7 @@ func (s *Owner) SetID(v string) *Owner { return s } +// Container for Parquet. type ParquetInput struct { _ struct{} `type:"structure"` } @@ -18261,6 +23388,7 @@ func (s ParquetInput) GoString() string { return s.String() } +// Container for elements related to a part. type Part struct { _ struct{} `type:"structure"` @@ -18337,6 +23465,7 @@ func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { return s } +// This data type contains information about progress of an operation. type Progress struct { _ struct{} `type:"structure"` @@ -18378,6 +23507,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress { return s } +// This data type contains information about the progress event of an operation. type ProgressEvent struct { _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` @@ -18418,6 +23548,21 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -18430,6 +23575,8 @@ type PublicAccessBlockConfiguration struct { // // * PUT Object calls fail if the request includes a public ACL. // + // * PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` @@ -18494,9 +23641,9 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi } type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - // Specifies the Accelerate Configuration you want to set for the bucket. + // Container for setting the transfer acceleration state. // // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -18555,6 +23702,20 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18570,13 +23731,16 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { } type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket to which to apply the ACL. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18683,6 +23847,20 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -18698,7 +23876,7 @@ func (s PutBucketAclOutput) GoString() string { } type PutBucketAnalyticsConfigurationInput struct { - _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // @@ -18710,7 +23888,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -18778,6 +23956,20 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18793,11 +23985,18 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { } type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + // Specifies the bucket impacted by the corsconfiguration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -18855,6 +24054,20 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -18870,16 +24083,18 @@ func (s PutBucketCorsOutput) GoString() string { } type PutBucketEncryptionInput struct { - _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket for which the server-side encryption configuration - // is set. + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for server-side encryption configuration rules. Currently S3 supports - // one rule only. + // Specifies the default server-side-encryption configuration. // // ServerSideEncryptionConfiguration is a required field ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -18938,6 +24153,20 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve return s } +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -18953,7 +24182,7 @@ func (s PutBucketEncryptionOutput) GoString() string { } type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` // The name of the bucket where the inventory configuration will be stored. // @@ -19033,6 +24262,20 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19048,11 +24291,14 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { } type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + // The name of the bucket for which to set the configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19106,6 +24352,20 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19121,11 +24381,12 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { } type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19179,6 +24440,20 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -19194,11 +24469,15 @@ func (s PutBucketLifecycleOutput) GoString() string { } type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + // The name of the bucket for which to set the logging parameters. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for logging status information. + // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19256,6 +24535,20 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -19271,7 +24564,7 @@ func (s PutBucketLoggingOutput) GoString() string { } type PutBucketMetricsConfigurationInput struct { - _ struct{} `type:"structure" payload:"MetricsConfiguration"` + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` // The name of the bucket for which the metrics configuration is set. // @@ -19351,6 +24644,20 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19366,8 +24673,10 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { } type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19431,6 +24740,20 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19446,11 +24769,15 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { } type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The container for the configuration. + // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19503,6 +24830,20 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -19518,8 +24859,10 @@ func (s PutBucketNotificationOutput) GoString() string { } type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19587,6 +24930,20 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -19602,8 +24959,10 @@ func (s PutBucketPolicyOutput) GoString() string { } type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + // The name of the bucket + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19612,6 +24971,8 @@ type PutBucketReplicationInput struct { // // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -19667,6 +25028,26 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -19682,11 +25063,15 @@ func (s PutBucketReplicationOutput) GoString() string { } type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for Payer. + // // RequestPaymentConfiguration is a required field RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19744,6 +25129,20 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -19759,11 +25158,15 @@ func (s PutBucketRequestPaymentOutput) GoString() string { } type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the TagSet and Tag elements. + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19821,6 +25224,20 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -19836,8 +25253,10 @@ func (s PutBucketTaggingOutput) GoString() string { } type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19845,6 +25264,8 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Container for setting the versioning state. + // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19903,6 +25324,20 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -19918,11 +25353,15 @@ func (s PutBucketVersioningOutput) GoString() string { } type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the request. + // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19980,6 +25419,20 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) return s } +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -19995,13 +25448,25 @@ func (s PutBucketWebsiteOutput) GoString() string { } type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20021,13 +25486,16 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Key for which the PUT operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -20144,6 +25612,20 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -20169,45 +25651,64 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { } type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. Body io.ReadSeeker `type:"blob"` - // Name of the bucket to which the PUT operation was initiated. + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -20230,7 +25731,8 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. @@ -20239,38 +25741,52 @@ type PutObjectInput struct { // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, S3 Standard is the default storage class. Amazon S3 + // supports other storage classes. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -20279,7 +25795,22 @@ type PutObjectInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -20473,6 +26004,12 @@ func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { s.SSEKMSKeyId = &v @@ -20503,10 +26040,32 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldInput struct { - _ struct{} `type:"structure" payload:"LegalHold"` + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - // The bucket containing the object that you want to place a Legal Hold on. + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20520,10 +26079,11 @@ type PutObjectLegalHoldInput struct { // specified object. LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a Legal Hold on. @@ -20599,6 +26159,20 @@ func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInpu return s } +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldOutput struct { _ struct{} `type:"structure"` @@ -20624,7 +26198,7 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo } type PutObjectLockConfigurationInput struct { - _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` // The bucket whose Object Lock configuration you want to create or replace. // @@ -20634,10 +26208,11 @@ type PutObjectLockConfigurationInput struct { // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // A token to allow Object Lock to be enabled for an existing bucket. @@ -20701,6 +26276,20 @@ func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfi return s } +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` @@ -20731,8 +26320,10 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -20745,16 +26336,25 @@ type PutObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. @@ -20801,6 +26401,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { s.SSEKMSKeyId = &v @@ -20820,15 +26426,22 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { } type PutObjectRetentionInput struct { - _ struct{} `type:"structure" payload:"Retention"` + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - // The bucket that contains the object you want to apply this Object Retention + // The bucket name that contains the object you want to apply this Object Retention // configuration to. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions.j + // Indicates whether this operation should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The key name for the object that you want to apply this Object Retention @@ -20837,10 +26450,11 @@ type PutObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -20926,6 +26540,20 @@ func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInpu return s } +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectRetentionOutput struct { _ struct{} `type:"structure"` @@ -20951,17 +26579,31 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti } type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Container for the TagSet and Tag elements + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The versionId of the object that the tag-set will be added to. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21036,9 +26678,24 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` + // The versionId of the object the tag-set was added to. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -21059,7 +26716,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput } type PutPublicAccessBlockInput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you // want to set. @@ -21125,6 +26782,20 @@ func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicA return s } +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -21139,17 +26810,18 @@ func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } -// A container for specifying the configuration for publication of messages -// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects -// specified events. +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. type QueueConfiguration struct { _ struct{} `type:"structure"` + // A collection of bucket events for which to send notifications + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -21158,7 +26830,7 @@ type QueueConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // QueueArn is a required field QueueArn *string `locationName:"Queue" type:"string" required:"true"` @@ -21214,6 +26886,10 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } +// This data type is deprecated. Use QueueConfiguration for the same purposes. +// This data type specifies the configuration for publishing messages to an +// Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified +// events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -21222,12 +26898,15 @@ type QueueConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of bucket events for which to send notifications Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. Queue *string `type:"string"` } @@ -21265,6 +26944,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } +// The container for the records event. type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` @@ -21304,6 +26984,15 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. type Redirect struct { _ struct{} `type:"structure"` @@ -21314,8 +27003,8 @@ type Redirect struct { // siblings is present. HttpRedirectCode *string `type:"string"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` // The object key prefix to use in the redirect request. For example, to redirect @@ -21327,7 +27016,7 @@ type Redirect struct { ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can + // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. ReplaceKeyWith *string `type:"string"` } @@ -21372,16 +27061,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` - // Name of the host where requests will be redirected. + // Name of the host where requests are redirected. // // HostName is a required field HostName *string `type:"string" required:"true"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. Protocol *string `type:"string" enum:"Protocol"` } @@ -21426,7 +27117,9 @@ type ReplicationConfiguration struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // (IAM) role that Amazon S3 can assume when replicating the objects. + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field Role *string `type:"string" required:"true"` @@ -21486,18 +27179,34 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// A container for information about a specific replication rule. +// Specifies which Amazon S3 objects to replicate and where to store the replicas. type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should replicate delete makers. + // Specifies whether Amazon S3 replicates the delete markers. If you specify + // a Filter, you must specify this element. However, in the latest version of + // replication configuration (when Filter is specified), Amazon S3 doesn't replicate + // delete markers. Therefore, the DeleteMarkerReplication element can contain + // only Disabled. For an example configuration, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, Amazon + // S3 handled replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - // A container for information about the replication destination. + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). // // Destination is a required field Destination *Destination `type:"structure" required:"true"` + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. Filter *ReplicationRuleFilter `type:"structure"` @@ -21505,8 +27214,9 @@ type ReplicationRule struct { // A unique identifier for the rule. The maximum value is 255 characters. ID *string `type:"string"` - // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -21516,27 +27226,24 @@ type ReplicationRule struct { // when filtering. If two or more rules identify the same object based on a // specified filter, the rule with higher priority takes precedence. For example: // - // * Same object quality prefix based filter criteria If prefixes you specified + // * Same object quality prefix-based filter criteria if prefixes you specified // in multiple rules overlap // - // * Same object qualify tag based filter criteria specified in multiple + // * Same object qualify tag-based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) - // in the Amazon S3 Developer Guide. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. Priority *int64 `type:"integer"` // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // an AWS KMS-Managed Key (SSE-KMS). - // - // If you want Amazon S3 to replicate objects created with server-side encryption - // using AWS KMS-Managed Keys. + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - // If status isn't enabled, the rule is ignored. + // Specifies whether the rule is enabled. // // Status is a required field Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` @@ -21566,6 +27273,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } if s.Filter != nil { if err := s.Filter.Validate(); err != nil { invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) @@ -21595,6 +27307,12 @@ func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { return s } +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + // SetFilter sets the Filter field's value. func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { s.Filter = v @@ -21631,11 +27349,25 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { return s } +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` + // An array of tags containing key and value pairs. Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } @@ -21697,8 +27429,8 @@ type ReplicationRuleFilter struct { // in an And tag. And *ReplicationRuleAndOperator `type:"structure"` - // An object keyname prefix that identifies the subset of objects to which the - // rule applies. + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -21755,6 +27487,91 @@ func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { return s } +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -21793,6 +27610,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } +// Container for specifying if periodic QueryProgress messages should be sent. type RequestProgress struct { _ struct{} `type:"structure"` @@ -21818,23 +27636,36 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { } type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + // The bucket name or containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21912,6 +27743,20 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -21957,7 +27802,7 @@ type RestoreRequest struct { // The optional description for the job. Description *string `type:"string"` - // Glacier related parameters pertaining to this job. Do not use with restores + // S3 Glacier related parameters pertaining to this job. Do not use with restores // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` @@ -21967,7 +27812,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -22051,6 +27896,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the redirect behavior and when a redirect is applied. type RoutingRule struct { _ struct{} `type:"structure"` @@ -22103,16 +27949,24 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) type Rule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object. Expiration *LifecycleExpiration `type:"structure"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. + // Unique identifier for the rule. The value can't be longer than 255 characters. ID *string `type:"string"` // Specifies when noncurrent object versions expire. Upon expiration, Amazon @@ -22123,25 +27977,30 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or - // DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning // is suspended), you can set this action to request that Amazon S3 transition // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - // GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's // lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - // Prefix identifying one or more objects to which the rule applies. + // Object key prefix that identifies one or more objects to which this rule + // applies. // // Prefix is a required field Prefix *string `type:"string" required:"true"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. // // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. Transition *Transition `type:"structure"` } @@ -22219,12 +28078,12 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } -// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the AWS Key Management Service (KMS) master encryption - // key to use for encrypting Inventory reports. + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` @@ -22259,7 +28118,7 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS { return s } -// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } @@ -22274,75 +28133,51 @@ func (s SSES3) GoString() string { return s.String() } -// SelectObjectContentEventStream provides handling of EventStreams for -// the SelectObjectContent API. -// -// Use this type to receive SelectObjectContentEventStream events. The events -// can be read from the Events channel member. -// -// The events that can be received are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStream struct { - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` } -// Close closes the EventStream. This will also cause the Events channel to be -// closed. You can use the closing of the Events channel to terminate your -// application's read from the API's EventStream. -// -// Will close the underlying EventStream reader. For EventStream over HTTP -// connection this will also close the HTTP connection. -// -// Close must be called when done using the EventStream API. Not calling Close -// may result in resource leaks. -func (es *SelectObjectContentEventStream) Close() (err error) { - es.Reader.Close() - return es.Err() +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) } -// Err returns any error that occurred while reading EventStream Events from -// the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.Reader.Err(); err != nil { - return err - } - es.StreamCloser.Close() +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} - return nil +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s } -// Events returns a channel to read EventStream Events from the -// SelectObjectContent API. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s } // SelectObjectContentEventStreamEvent groups together all EventStream -// events read from the SelectObjectContent API. +// events writes for SelectObjectContentEventStream. // // These events are: // @@ -22353,11 +28188,12 @@ func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEve // * StatsEvent type SelectObjectContentEventStreamEvent interface { eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler } -// SelectObjectContentEventStreamReader provides the interface for reading EventStream -// Events from the SelectObjectContent API. The -// default implementation for this interface will be SelectObjectContentEventStream. +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. // // The reader's Close method must allow multiple concurrent calls. // @@ -22372,8 +28208,7 @@ type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent - // Close will close the underlying event stream reader. For event stream over - // HTTP this will also close the HTTP connection. + // Close will stop the reader reading events from the stream. Close() error // Returns any error that has occurred while reading from the event stream. @@ -22383,57 +28218,44 @@ type SelectObjectContentEventStreamReader interface { type readSelectObjectContentEventStream struct { eventReader *eventstreamapi.EventReader stream chan SelectObjectContentEventStreamEvent - errVal atomic.Value + err *eventstreamapi.OnceError done chan struct{} closeOnce sync.Once } -func newReadSelectObjectContentEventStream( - reader io.ReadCloser, - unmarshalers request.HandlerList, - logger aws.Logger, - logLevel aws.LogLevelType, -) *readSelectObjectContentEventStream { +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { r := &readSelectObjectContentEventStream{ - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), } - - r.eventReader = eventstreamapi.NewEventReader( - reader, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: unmarshalers, - }, - r.unmarshalerForEventType, - ) - r.eventReader.UseLogger(logger, logLevel) + go r.readEventStream() return r } -// Close will close the underlying event stream reader. For EventStream over -// HTTP this will also close the HTTP connection. +// Close will close the underlying event stream reader. func (r *readSelectObjectContentEventStream) Close() error { r.closeOnce.Do(r.safeClose) - return r.Err() } +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + func (r *readSelectObjectContentEventStream) safeClose() { close(r.done) - err := r.eventReader.Close() - if err != nil { - r.errVal.Store(err) - } } func (r *readSelectObjectContentEventStream) Err() error { - if v := r.errVal.Load(); v != nil { - return v.(error) - } - - return nil + return r.err.Err() } func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { @@ -22441,6 +28263,7 @@ func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContent } func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() defer close(r.stream) for { @@ -22455,7 +28278,7 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } - r.errVal.Store(err) + r.err.SetError(err) return } @@ -22467,22 +28290,20 @@ func (r *readSelectObjectContentEventStream) readEventStream() { } } -func (r *readSelectObjectContentEventStream) unmarshalerForEventType( - eventType string, -) (eventstreamapi.Unmarshaler, error) { +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { switch eventType { case "Cont": return &ContinuationEvent{}, nil - case "End": return &EndEvent{}, nil - case "Progress": return &ProgressEvent{}, nil - case "Records": return &RecordsEvent{}, nil - case "Stats": return &StatsEvent{}, nil default: @@ -22514,7 +28335,7 @@ type SelectObjectContentInput struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (for example., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -22537,17 +28358,35 @@ type SelectObjectContentInput struct { // Specifies if periodic request progress information should be enabled. RequestProgress *RequestProgress `type:"structure"` - // The SSE Algorithm used to encrypt the object. For more information, see - // Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // The SSE Customer Key. For more information, see Server-Side Encryption (Using // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` - // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // The SSE Customer Key MD5. For more information, see Server-Side Encryption // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -22668,11 +28507,30 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` - // Use EventStream to use the API's stream. - EventStream *SelectObjectContentEventStream `type:"structure"` + EventStream *SelectObjectContentEventStream } // String returns the string representation @@ -22685,29 +28543,17 @@ func (s SelectObjectContentOutput) GoString() string { return s.String() } -// SetEventStream sets the EventStream field's value. func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { s.EventStream = v return s } +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} -func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { - if r.Error != nil { - return - } - reader := newReadSelectObjectContentEventStream( - r.HTTPResponse.Body, - r.Handlers.UnmarshalStream, - r.Config.Logger, - r.Config.LogLevel.Value(), - ) - go reader.readEventStream() - - eventStream := &SelectObjectContentEventStream{ - StreamCloser: r.HTTPResponse.Body, - Reader: reader, - } - s.EventStream = eventStream +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream } // Describes the parameters for Select job types. @@ -22719,7 +28565,7 @@ type SelectParameters struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (e.g., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -22792,13 +28638,31 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec } // Describes the default server-side encryption to apply to new objects in the -// bucket. If Put Object request does not specify any server-side encryption, -// this default encryption will be applied. +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // KMS master key ID to use for the default encryption. This parameter is allowed - // if SSEAlgorithm is aws:kms. + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -22842,8 +28706,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc return s } -// Container for server-side encryption configuration rules. Currently S3 supports -// one rule only. +// Specifies the default server-side-encryption configuration. type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -22893,13 +28756,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu return s } -// Container for information about a particular server-side encryption configuration -// rule. +// Specifies the default server-side encryption configuration. type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // Describes the default server-side encryption to apply to new objects in the - // bucket. If Put Object request does not specify any server-side encryption, + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } @@ -22935,13 +28797,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } -// A container for filters that define which source objects should be replicated. +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // A container for filter information for the selection of S3 objects encrypted - // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration, - // this element is required. + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } @@ -22981,8 +28847,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // If the status is not Enabled, replication for S3 objects encrypted with AWS - // KMS is disabled. + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using a customer master key (CMK) stored in AWS Key Management Service. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -23017,6 +28883,7 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { return s } +// Container for the stats details. type Stats struct { _ struct{} `type:"structure"` @@ -23058,6 +28925,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats { return s } +// Container for the Stats Event. type StatsEvent struct { _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` @@ -23098,11 +28966,24 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container used to describe how data related to the storage class analysis - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -23137,6 +29018,8 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -23194,6 +29077,7 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } +// A container of a key value name pair. type Tag struct { _ struct{} `type:"structure"` @@ -23249,9 +29133,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Container for TagSet elements. type Tagging struct { _ struct{} `type:"structure"` + // A collection for a set of tags + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -23295,9 +29182,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } +// Container for granting information. type TargetGrant struct { _ struct{} `type:"structure"` + // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. @@ -23342,16 +29231,20 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { } // A container for specifying the configuration for publication of messages -// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3 +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // A container for object key name filtering rules. For information about key - // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` @@ -23360,7 +29253,7 @@ type TopicConfiguration struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // will publish a message when it detects events of the specified type. + // publishes a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -23416,6 +29309,10 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -23424,6 +29321,7 @@ type TopicConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of events related to objects Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. @@ -23469,18 +29367,22 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. type Transition struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. Days *int64 `type:"integer"` - // The class of storage used to store the object. + // The storage class to which you want the object to transition. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -23513,8 +29415,10 @@ func (s *Transition) SetStorageClass(v string) *Transition { } type UploadPartCopyInput struct { - _ struct{} `type:"structure"` + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23540,23 +29444,26 @@ type UploadPartCopyInput struct { // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object + // first 10 bytes of the source. You can copy a range only if the source object // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -23566,26 +29473,28 @@ type UploadPartCopyInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -23758,9 +29667,24 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` // The version of the source object that was copied, if you have enabled versioning @@ -23777,16 +29701,17 @@ type UploadPartCopyOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -23843,7 +29768,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy } type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` // Object data. Body io.ReadSeeker `type:"blob"` @@ -23857,7 +29782,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` // Object key for which the multipart upload was initiated. @@ -23871,26 +29798,28 @@ type UploadPartInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -24017,6 +29946,20 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -24033,16 +29976,16 @@ type UploadPartOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -24092,6 +30035,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -24126,15 +30072,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } +// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -24347,11 +30300,37 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + // EventS3ObjectRestorePost is a Event enum value EventS3ObjectRestorePost = "s3:ObjectRestore:Post" // EventS3ObjectRestoreCompleted is a Event enum value EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" ) const ( @@ -24443,6 +30422,9 @@ const ( // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" ) const ( @@ -24477,6 +30459,14 @@ const ( MetadataDirectiveReplace = "REPLACE" ) +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -24625,6 +30615,14 @@ const ( ReplicationStatusReplica = "REPLICA" ) +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + // If present, indicates that the requester was successfully charged for the // request. const ( @@ -24632,10 +30630,11 @@ const ( RequestChargedRequester = "requester" ) -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go index 5c8ce5cc..407f06b6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -25,30 +24,6 @@ const ( appendMD5TxEncoding = "append-md5" ) -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - if _, err := copySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} - // computeBodyHashes will add Content MD5 and Content Sha256 hashes to the // request. If the body is not seekable or S3DisableContentMD5Validation set // this handler will be ignored. @@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) { dst = io.MultiWriter(hashers...) } - if _, err := copySeekableBody(dst, r.Body); err != nil { + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) return } @@ -119,28 +94,6 @@ const ( sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen ) -func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} - // Adds the x-amz-te: append_md5 header to the request. This requests the service // responds with a trailing MD5 checksum. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index bc68a46a..9ba8a788 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 95f24563..a7698d5e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/s3err" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) func init() { @@ -13,11 +14,12 @@ func init() { func defaultInitClientFn(c *client.Client) { // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) + c.Handlers.Build.PushFront(endpointHandler) // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() @@ -26,17 +28,11 @@ func defaultInitClientFn(c *client.Client) { } func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. + // Add request handlers for specific platforms. // e.g. 100-continue support for PUT requests using Go 1.6 platformRequestHandlers(r) switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) case opGetBucketLocation: // GetBucketLocation has custom parsing logic r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) @@ -72,3 +68,8 @@ type sseCustomerKeyGetter interface { type copySourceSSECustomerKeyGetter interface { getCopySourceSSECustomerKey() string } + +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 39b912c2..4b65f715 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -63,6 +63,20 @@ // See the s3manager package's Downloader type documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader // +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// // Get Bucket Region // // GetBucketRegion will attempt to get the region for a bucket using a region diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 00000000..c4048fbf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,233 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + return arn.ParseAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = newInvalidARNError(nil, err) + return + } + + resReq := resourceRequest{ + Resource: resource, + Request: req, + } + + if resReq.IsCrossPartition() { + req.Error = newClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = newClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if resReq.HasCustomEndpoint() { + req.Error = newInvalidARNWithCustomEndpointError(resource, nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = newInvalidARNError(resource, nil) + } +} + +type resourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +func (r resourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +func (r resourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +func (r resourceRequest) UseFIPS() bool { + return isFIPS(aws.StringValue(r.Request.Config.Region)) +} + +func (r resourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +func (r resourceRequest) IsCrossRegion() bool { + return isCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +func (r resourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +func isFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} +func isCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return newClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +type accessPointEndpointBuilder arn.AccessPointARN + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." +) + +func (a accessPointEndpointBuilder) Build(req *request.Request) error { + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if isFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion) + if err != nil { + return newFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dualstack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accesPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + req.ClientInfo.SigningName = endpoint.SigningName + req.ClientInfo.SigningRegion = endpoint.SigningRegion + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return newInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go new file mode 100644 index 00000000..9df03e78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go @@ -0,0 +1,151 @@ +package s3 + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +type invalidARNError struct { + message string + resource arn.Resource + origErr error +} + +func (e invalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e invalidARNError) Code() string { + return invalidARNErrorErrCode +} + +func (e invalidARNError) Message() string { + return e.message +} + +func (e invalidARNError) OrigErr() error { + return e.origErr +} + +func newInvalidARNError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// ARN not supported for the target partition +func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +type configurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +func (e configurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e configurationError) Code() string { + return configurationErrorErrCode +} + +func (e configurationError) Message() string { + return e.message +} + +func (e configurationError) OrigErr() error { + return e.origErr +} + +func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for S3 Accelerate but is supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 931cb17b..49aeff16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -13,6 +13,12 @@ const ( // ErrCodeBucketAlreadyOwnedByYou for service response error code // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" // ErrCodeNoSuchBucket for service response error code @@ -36,13 +42,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier + // This operation is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon Glacier. + // only stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index a7fbc2de..81cdec1a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{ opListBuckets, opCreateBucket, opDeleteBucket, } -// Request handler to automatically add the bucket name to the endpoint domain +// Automatically add the bucket name to the endpoint domain // if possible. This style of bucket is valid for all bucket names which are // DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { +func updateEndpointForS3Config(r *request.Request, bucketName string) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) @@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) { r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") } } - updateEndpointForAccelerate(r) + updateEndpointForAccelerate(r, bucketName) } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) + updateEndpointForHostStyle(r, bucketName) } } -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { // bucket name must be valid to put into the host return } - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } var ( accelElem = []byte("s3-accelerate.dualstack.") ) -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), nil) return } @@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) { r.HTTPRequest.URL.Host = strings.Join(parts, ".") - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } // Attempts to retrieve the bucket name from the request input parameters. @@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool { // moveBucketToHost moves the bucket name from the URI path to URL host. func moveBucketToHost(u *url.URL, bucket string) { u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } + removeBucketFromPath(u) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go new file mode 100644 index 00000000..2f93f96f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go @@ -0,0 +1,45 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{a, "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{a, "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go new file mode 100644 index 00000000..a942d887 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go @@ -0,0 +1,71 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{a, "partition not set"} + } + if a.Service != "s3" { + return nil, InvalidARNError{a, "service is not S3"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{a, "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +func (e InvalidARNError) Error() string { + return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go index 18215574..22bd0b7c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -273,7 +273,7 @@ type DeleteObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (iter *DeleteObjectsIterator) Next() bool { if iter.inc { @@ -458,7 +458,7 @@ type DownloadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *DownloadObjectsIterator) Next() bool { if batcher.inc { @@ -497,7 +497,7 @@ type UploadObjectsIterator struct { inc bool } -// Next will increment the default iterator's index and and ensure that there +// Next will increment the default iterator's index and ensure that there // is another object to iterator to. func (batcher *UploadObjectsIterator) Next() bool { if batcher.inc { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go new file mode 100644 index 00000000..f1d9e85c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go @@ -0,0 +1,81 @@ +package s3manager + +import ( + "io" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go new file mode 100644 index 00000000..42276530 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go new file mode 100644 index 00000000..687082c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go new file mode 100644 index 00000000..ada50c24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go @@ -0,0 +1,7 @@ +// +build !windows + +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go new file mode 100644 index 00000000..7e9d9579 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go index 4a9ad65e..4b54b7c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -25,13 +25,25 @@ const DefaultDownloadPartSize = 1024 * 1024 * 5 // when using Download(). const DefaultDownloadConcurrency = 5 +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + // The Downloader structure that calls Download(). It is safe to call Download() // on this structure for multiple objects and across concurrent goroutines. // Mutating the Downloader's properties is not safe to be done concurrently. type Downloader struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultDownloadPartSize value will be used. + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. // // PartSize is ignored if the Range input parameter is provided. PartSize int64 @@ -50,6 +62,14 @@ type Downloader struct { // List of request options that will be passed down to individual API // operation requests made by the downloader. RequestOptions []request.Option + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider } // WithDownloaderRequestOptions appends to the Downloader's API request options. @@ -77,10 +97,15 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { // d.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + return newDownloader(s3.New(c), options...) +} + +func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { d := &Downloader{ - S3: s3.New(c), - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, + S3: client, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), } for _, option := range options { option(d) @@ -99,7 +124,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl // sess := session.Must(session.NewSession()) // // // The S3 client the S3 Downloader will use -// s3Svc := s3.new(sess) +// s3Svc := s3.New(sess) // // // Create a downloader with the s3 client and default options // downloader := s3manager.NewDownloaderWithClient(s3Svc) @@ -109,16 +134,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl // d.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { - d := &Downloader{ - S3: svc, - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, - } - for _, option := range options { - option(d) - } - - return d + return newDownloader(svc, options...) } type maxRetrier interface { @@ -126,7 +142,8 @@ type maxRetrier interface { } // Download downloads an object in S3 and writes the payload into w using -// concurrent GET requests. +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // Additional functional options can be provided to configure the individual // download. These options are copies of the Downloader instance Download is called from. @@ -148,7 +165,8 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options .. } // DownloadWithContext downloads an object in S3 and writes the payload into w -// using concurrent GET requests. +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. // // DownloadWithContext is the same as Download with the additional support for // Context input parameters. The Context must not be nil. A nil Context will @@ -403,18 +421,20 @@ func (d *downloader) downloadChunk(chunk dlchunk) error { var n int64 var err error for retry := 0; retry <= d.partBodyMaxRetries; retry++ { - var resp *s3.GetObjectOutput - resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) - if err != nil { - return err - } - d.setTotalBytes(resp) // Set total if not yet set. - - n, err = io.Copy(&chunk, resp.Body) - resp.Body.Close() + n, err = d.tryDownloadChunk(in, &chunk) if err == nil { break } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } chunk.cur = 0 logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, @@ -427,6 +447,28 @@ func (d *downloader) downloadChunk(chunk dlchunk) error { return err } +func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(w, resp.Body) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { s, ok := svc.(*s3.S3) if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go new file mode 100644 index 00000000..05113286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go @@ -0,0 +1,244 @@ +package s3manager + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +type byteSlicePool interface { + Get(aws.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go new file mode 100644 index 00000000..f62e1a45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package s3manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index da1838c9..8770d404 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -145,8 +145,13 @@ type Uploader struct { // MaxUploadParts is the max number of parts which will be uploaded to S3. // Will be used to calculate the partsize of the object to be uploaded. // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. - // With a limited of s3.MaxUploadParts (10,000 parts). + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. // // Defaults to package const's MaxUploadParts value. MaxUploadParts int @@ -157,6 +162,12 @@ type Uploader struct { // List of request options that will be passed down to individual API // operation requests made by the uploader. RequestOptions []request.Option + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool } // NewUploader creates a new Uploader instance to upload objects to S3. Pass In @@ -176,18 +187,25 @@ type Uploader struct { // u.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + return newUploader(s3.New(c), options...) +} + +func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { u := &Uploader{ - S3: s3.New(c), + S3: client, PartSize: DefaultUploadPartSize, Concurrency: DefaultUploadConcurrency, LeavePartsOnError: false, MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), } for _, option := range options { option(u) } + u.partPool = newByteSlicePool(u.PartSize) + return u } @@ -210,19 +228,7 @@ func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader // u.PartSize = 64 * 1024 * 1024 // 64MB per part // }) func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { - u := &Uploader{ - S3: svc, - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - MaxUploadParts: MaxUploadParts, - } - - for _, option := range options { - option(u) - } - - return u + return newUploader(svc, options...) } // Upload uploads an object to S3, intelligently buffering large files into @@ -282,6 +288,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts .. for _, opt := range opts { opt(&i.cfg) } + i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) return i.upload() @@ -351,14 +358,15 @@ type uploader struct { readerPos int64 // current reader position totalSize int64 // set to -1 if the size is not known - - bufferPool sync.Pool } // internal logic for deciding whether to upload a single part or use a // multipart upload. func (u *uploader) upload() (*UploadOutput, error) { - u.init() + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } + defer u.cfg.partPool.Close() if u.cfg.PartSize < MinUploadPartSize { msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) @@ -366,19 +374,20 @@ func (u *uploader) upload() (*UploadOutput, error) { } // Do one read to determine if we have more than one part - reader, _, part, err := u.nextReader() + reader, _, cleanup, err := u.nextReader() if err == io.EOF { // single part - return u.singlePart(reader) + return u.singlePart(reader, cleanup) } else if err != nil { + cleanup() return nil, awserr.New("ReadRequestBody", "read upload data failed", err) } mu := multiuploader{uploader: u} - return mu.upload(reader, part) + return mu.upload(reader, cleanup) } // init will initialize all default options. -func (u *uploader) init() { +func (u *uploader) init() error { if u.cfg.Concurrency == 0 { u.cfg.Concurrency = DefaultUploadConcurrency } @@ -389,24 +398,35 @@ func (u *uploader) init() { u.cfg.MaxUploadParts = MaxUploadParts } - u.bufferPool = sync.Pool{ - New: func() interface{} { return make([]byte, u.cfg.PartSize) }, + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err } - // Try to get the total size for some optimizations - u.initSize() + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil } // initSize tries to detect the total stream size, setting u.totalSize. If // the size is not known, totalSize is set to -1. -func (u *uploader) initSize() { +func (u *uploader) initSize() error { u.totalSize = -1 switch r := u.in.Body.(type) { case io.Seeker: n, err := aws.SeekerLen(r) if err != nil { - return + return err } u.totalSize = n @@ -418,17 +438,15 @@ func (u *uploader) initSize() { u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 } } + + return nil } // nextReader returns a seekable reader representing the next packet of data. // This operation increases the shared u.readerPos counter, but note that it // does not need to be wrapped in a mutex because nextReader is only called // from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) { - type readerAtSeeker interface { - io.ReaderAt - io.ReadSeeker - } +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { switch r := u.in.Body.(type) { case readerAtSeeker: var err error @@ -443,17 +461,36 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) { } } - reader := io.NewSectionReader(r, u.readerPos, n) + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + u.readerPos += n - return reader, int(n), nil, err + return reader, int(n), cleanup, err default: - part := u.bufferPool.Get().([]byte) - n, err := readFillBuf(r, part) + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) u.readerPos += int64(n) - return bytes.NewReader(part[0:n]), n, part, err + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err } } @@ -470,10 +507,12 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) { // singlePart contains upload logic for uploading a single chunk via // a regular PutObject request. Multipart requests require at least two // parts, or at least 5MB of data. -func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + params := &s3.PutObjectInput{} awsutil.Copy(params, u.in) - params.Body = buf + params.Body = r // Need to use request form because URL generated in request is // used in return. @@ -503,9 +542,9 @@ type multiuploader struct { // keeps track of a single chunk of data being sent to S3. type chunk struct { - buf io.ReadSeeker - part []byte - num int64 + buf io.ReadSeeker + num int64 + cleanup func() } // completedParts is a wrapper to make parts sortable by their part number, @@ -518,13 +557,14 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa // upload will perform a multipart upload using the firstBuf buffer containing // the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) { +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { params := &s3.CreateMultipartUploadInput{} awsutil.Copy(params, u.in) // Create the multipart resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) if err != nil { + cleanup() return nil, err } u.uploadID = *resp.UploadId @@ -538,46 +578,29 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa // Send part 1 to the workers var num int64 = 1 - ch <- chunk{buf: firstBuf, part: firstPart, num: num} + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} // Read and queue the rest of the parts for u.geterr() == nil && err == nil { - num++ - // This upload exceeded maximum number of supported parts, error now. - if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) { - var msg string - if num > int64(u.cfg.MaxUploadParts) { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) } - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - - var reader io.ReadSeeker - var nextChunkLen int - var part []byte - reader, nextChunkLen, part, err = u.nextReader() - - if err != nil && err != io.EOF { - u.seterr(awserr.New( - "ReadRequestBody", - "read multipart upload data failed", - err)) break } - if nextChunkLen == 0 { - // No need to upload empty part, if file was empty to start - // with empty single part would of been created and never - // started multipart upload. - break - } + num++ - ch <- chunk{buf: reader, part: part, num: num} + ch <- chunk{buf: reader, num: num, cleanup: cleanup} } // Close the channel, wait for workers, and complete upload @@ -602,6 +625,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa Key: u.in.Key, }) getReq.Config.Credentials = credentials.AnonymousCredentials + getReq.SetContext(u.ctx) uploadLocation, _, _ := getReq.PresignRequest(1) return &UploadOutput{ @@ -611,6 +635,35 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa }, nil } +func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { + var msg string + if part > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, awserr.New("TotalPartsExceeded", msg, nil) + } + + return true, err +} + // readChunk runs in worker goroutines to pull chunks off of the ch channel // and send() them as UploadPart requests. func (u *multiuploader) readChunk(ch chan chunk) { @@ -627,6 +680,8 @@ func (u *multiuploader) readChunk(ch chan chunk) { u.seterr(err) } } + + data.cleanup() } } @@ -642,9 +697,8 @@ func (u *multiuploader) send(c chunk) error { SSECustomerKey: u.in.SSECustomerKey, PartNumber: &c.num, } + resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) - // put the byte array back into the pool to conserve memory - u.bufferPool.Put(c.part) if err != nil { return err } @@ -716,3 +770,8 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { return resp } + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index f8ce34fa..c8810c11 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -12,41 +12,59 @@ import ( // package's PutObjectInput with the exception that the Body member is an // io.Reader instead of an io.ReadSeeker. type UploadInput struct { - _ struct{} `type:"structure" payload:"Body"` + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The readable body payload to send to S3. Body io.Reader - // Name of the bucket to which the PUT operation was initiated. + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -69,7 +87,8 @@ type UploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` // The Object Lock mode that you want to apply to this object. @@ -78,38 +97,52 @@ type UploadInput struct { // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, S3 Standard is the default storage class. Amazon S3 + // supports other storage classes. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -118,6 +151,21 @@ type UploadInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go new file mode 100644 index 00000000..765dc07c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go @@ -0,0 +1,75 @@ +package s3manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index d17dcc9d..b4c07b4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "s3" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifer of a specific service. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a S3 client from just a session. // svc := s3.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, @@ -75,6 +78,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) // Run custom client initialization if present diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 8010c4fa..b71c835d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,6 +3,7 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) { } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index fde3050f..247770e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "io" "io/ioutil" "net/http" @@ -14,7 +15,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", "unable to read response body", err), + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { r.HTTPResponse.Body = ioutil.NopCloser(body) defer body.Seek(0, sdkio.SeekStart) - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { r.Error = nil return } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 1db7e133..6eecf669 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "encoding/xml" "fmt" "io" @@ -11,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -42,29 +44,41 @@ func unmarshalError(r *request.Request) { return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) } else { - errCode = resp.Code - errMsg = resp.Message - err = nil + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return } // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { + if len(errResp.Code) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), + awserr.New(errResp.Code, errResp.Message, err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -80,3 +94,21 @@ type RequestFailure interface { // Host ID is the S3 Host ID needed for debug, and contacting support HostID() string } + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 1853d0ec..550b5f68 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -78,6 +78,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRole last // for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -91,6 +93,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: You cannot call // the AWS STS GetFederationToken or GetSessionToken API operations. @@ -99,14 +103,14 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent // AWS API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed // by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // To assume a role from a different account, your AWS account must be trusted @@ -131,6 +135,24 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// // Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information @@ -165,9 +187,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -256,6 +287,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. Your role session lasts for the @@ -271,6 +304,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot // call the STS GetFederationToken or GetSessionToken API operations. @@ -279,22 +314,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent // AWS API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed // by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document // that is uploaded for the SAML provider entity for your identity provider. @@ -302,8 +331,50 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail // logs. The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // For more information, see the following resources: // @@ -332,9 +403,18 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -456,6 +536,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service API operations. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -469,6 +551,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: // you cannot call the STS GetFederationToken or GetSessionToken API operations. @@ -477,16 +561,52 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent // AWS API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed // by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that // the application can assume. The role that your application assumes must trust @@ -506,8 +626,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) // and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). // Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then // using those credentials to make a request to AWS. @@ -515,8 +634,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and // AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). // These toolkits contain sample apps that show how to invoke the identity -// providers, and then how to use the information from these providers to -// get and use temporary security credentials. +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -536,9 +655,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -548,11 +676,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // can also mean that the claim has expired or has been explicitly revoked. // // * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the non-AWS identity provider -// (IDP) that was asked to verify the incoming identity token could not be reached. -// This is often a transient error caused by network conditions. Retry the request +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the -// error persists, the non-AWS identity provider might be down or not responding. +// error persists, the identity provider might be down or not responding. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get @@ -703,6 +831,103 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco return out, req.Send() } +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the @@ -747,8 +972,16 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // GetCallerIdentity API operation for AWS Security Token Service. // -// Returns details about the IAM identity whose credentials are used to call -// the API. +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -840,7 +1073,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // or an OpenID Connect-compatible identity provider. In this case, we recommend // that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. // // You can also call GetFederationToken using the security credentials of an // AWS account root user, but we do not recommend it. Instead, we recommend @@ -850,41 +1084,67 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // +// Session duration +// // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// is 43,200 seconds (12 hours). Temporary credentials that are obtained by -// using AWS account root user credentials have a maximum duration of 3,600 -// seconds (1 hour). +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). // -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: +// Permissions // -// * You cannot use these credentials to call any IAM API operations. +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: // -// * You cannot call any STS API operations except GetCallerIdentity. +// * You cannot call any IAM operations using the AWS CLI or the AWS API. // -// Permissions +// * You cannot call any STS operations except GetCallerIdentity. // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. +// and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. The only exception -// is when the credentials are used to access a resource that has a resource-based -// policy that specifically references the federated user session in the Principal -// element of the policy. When you pass session policies, the session permissions -// are the intersection of the IAM user policies and the session policies that -// you pass. This gives you a way to further restrict the permissions for a -// federated user. You cannot use session policies to grant more permissions -// than those that are defined in the permissions policy of the IAM user. For -// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. For information about using GetFederationToken to // create temporary security credentials, see GetFederationToken—Federation // Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -898,9 +1158,18 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -989,6 +1258,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Session Duration +// // The GetSessionToken operation must be called by using the long-term AWS security // credentials of the AWS account root user or an IAM user. Credentials that // are created by IAM users are valid for the duration that you specify. This @@ -997,13 +1268,15 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // based on account credentials can range from 900 seconds (15 minutes) up to // 3,600 seconds (1 hour), with a default of 1 hour. // +// Permissions +// // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // // * You cannot call any IAM API operations unless MFA authentication information // is included in the request. // -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. // // We recommend that you do not call GetSessionToken with AWS account root user // credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) @@ -1107,20 +1380,20 @@ type AssumeRoleInput struct { // the role's temporary credentials in subsequent AWS API calls to access resources // in the account that owns the role. You cannot use session policies to grant // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1129,15 +1402,15 @@ type AssumeRoleInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1145,7 +1418,7 @@ type AssumeRoleInput struct { // in subsequent AWS API calls to access resources in the account that owns // the role. You cannot use session policies to grant more permissions than // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -1182,6 +1455,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -1190,6 +1498,19 @@ type AssumeRoleInput struct { // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` } // String returns the string representation @@ -1242,6 +1563,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1291,12 +1622,24 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + // SetTokenCode sets the TokenCode field's value. func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { s.TokenCode = &v return s } +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. type AssumeRoleOutput struct { @@ -1316,9 +1659,10 @@ type AssumeRoleOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -1385,20 +1729,20 @@ type AssumeRoleWithSAMLInput struct { // the role's temporary credentials in subsequent AWS API calls to access resources // in the account that owns the role. You cannot use session policies to grant // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1407,15 +1751,15 @@ type AssumeRoleWithSAMLInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1423,7 +1767,7 @@ type AssumeRoleWithSAMLInput struct { // in subsequent AWS API calls to access resources in the account that owns // the role. You cannot use session policies to grant more permissions than // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -1444,7 +1788,7 @@ type AssumeRoleWithSAMLInput struct { // in the IAM User Guide. // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1571,9 +1915,10 @@ type AssumeRoleWithSAMLOutput struct { // ) ) NameQualifier *string `type:"string"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The value of the NameID element in the Subject element of the SAML assertion. @@ -1680,20 +2025,20 @@ type AssumeRoleWithWebIdentityInput struct { // the role's temporary credentials in subsequent AWS API calls to access resources // in the account that owns the role. You cannot use session policies to grant // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1702,15 +2047,15 @@ type AssumeRoleWithWebIdentityInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1718,7 +2063,7 @@ type AssumeRoleWithWebIdentityInput struct { // in subsequent AWS API calls to access resources in the account that owns // the role. You cannot use session policies to grant more permissions than // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -1755,7 +2100,7 @@ type AssumeRoleWithWebIdentityInput struct { // the application makes an AssumeRoleWithWebIdentity call. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation @@ -1881,9 +2226,10 @@ type AssumeRoleWithWebIdentityOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect @@ -1955,7 +2301,7 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2123,7 +2469,7 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2157,6 +2503,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2249,30 +2662,33 @@ type GetFederationTokenInput struct { // use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives // you a way to further restrict the permissions for a federated user. You cannot // use session policies to grant more permissions than those that are defined // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2283,31 +2699,62 @@ type GetFederationTokenInput struct { // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline - // and managed session policies shouldn't exceed 2048 characters. You can provide + // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives // you a way to further restrict the permissions for a federated user. You cannot // use session policies to grant more permissions than those that are defined // in the permissions policy of the IAM user. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session) + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2345,6 +2792,16 @@ func (s *GetFederationTokenInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2376,6 +2833,12 @@ func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetF return s } +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2394,9 +2857,10 @@ type GetFederationTokenOutput struct { // an Amazon S3 bucket policy. FederatedUser *FederatedUser `type:"structure"` - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -2546,7 +3010,7 @@ type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. Arn *string `locationName:"arn" min:"20" type:"string"` } @@ -2579,3 +3043,73 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { s.Arn = &v return s } + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000..d5307fca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index fa7a4c66..fcb720dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -9,14 +9,6 @@ // This guide provides descriptions of the STS API. For more detailed information // about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// // For information about setting up signatures and authorization through the // API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about the Query API, @@ -53,11 +45,11 @@ // in the IAM User Guide. // // After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you use both the setRegion -// and setEndpoint methods to make calls to a Regional endpoint. You can use -// the setRegion method alone for manually enabled Regions, such as Asia Pacific -// (Hong Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you use the setRegion method alone for Regions enabled by default, +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, // the calls are directed to the global endpoint of https://sts.amazonaws.com. // // To view the list of AWS STS endpoints and whether they are active by default, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c3..a233f542 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -14,11 +14,11 @@ const ( // ErrCodeIDPCommunicationErrorException for service response error code // "IDPCommunicationError". // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. + // error persists, the identity provider might be down or not responding. ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" // ErrCodeIDPRejectedClaimException for service response error code @@ -56,9 +56,18 @@ const ( // ErrCodePackedPolicyTooLargeException for service response error code // "PackedPolicyTooLarge". // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" // ErrCodeRegionDisabledException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d..d34a6855 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "sts" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifer of a specific service. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the STS client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a STS client from just a session. // svc := sts.New(mySession) // @@ -46,11 +48,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000..e2e1d6ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/bitly/go-hostpool/.travis.yml b/vendor/github.com/bitly/go-hostpool/.travis.yml index e69de29b..e08d89dd 100644 --- a/vendor/github.com/bitly/go-hostpool/.travis.yml +++ b/vendor/github.com/bitly/go-hostpool/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go + +go: + - 1.x + +notifications: + email: false diff --git a/vendor/github.com/bitly/go-hostpool/README.md b/vendor/github.com/bitly/go-hostpool/README.md index 7f443727..087463d8 100644 --- a/vendor/github.com/bitly/go-hostpool/README.md +++ b/vendor/github.com/bitly/go-hostpool/README.md @@ -1,6 +1,9 @@ go-hostpool =========== +[![Build Status](https://secure.travis-ci.org/bitly/go-hostpool.png?branch=master)](http://travis-ci.org/bitly/go-hostpool) [![GoDoc](https://godoc.org/github.com/bitly/go-hostpool?status.svg)](https://godoc.org/github.com/bitly/go-hostpool) [![GitHub release](https://img.shields.io/github/release/bitly/go-hostpool.svg)](https://github.com/bitly/go-hostpool/releases/latest) + + A Go package to intelligently and flexibly pool among multiple hosts from your Go application. Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are avoided. diff --git a/vendor/github.com/bitly/go-hostpool/go.mod b/vendor/github.com/bitly/go-hostpool/go.mod new file mode 100644 index 00000000..c581799b --- /dev/null +++ b/vendor/github.com/bitly/go-hostpool/go.mod @@ -0,0 +1,5 @@ +module github.com/bitly/go-hostpool + +go 1.13 + +require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/bitly/go-hostpool/go.sum b/vendor/github.com/bitly/go-hostpool/go.sum new file mode 100644 index 00000000..e863f517 --- /dev/null +++ b/vendor/github.com/bitly/go-hostpool/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/bitly/go-hostpool/hostpool.go b/vendor/github.com/bitly/go-hostpool/hostpool.go index d65cb2de..3ca2f4e4 100644 --- a/vendor/github.com/bitly/go-hostpool/hostpool.go +++ b/vendor/github.com/bitly/go-hostpool/hostpool.go @@ -9,7 +9,7 @@ import ( "time" ) -// Returns current version +// Version returns current version func Version() string { return "0.1" } @@ -110,7 +110,7 @@ func doMark(err error, r HostPoolResponse) { } } -// return an entry from the HostPool +// Get returns an entry from the HostPool func (p *standardHostPool) Get() HostPoolResponse { p.Lock() defer p.Unlock() diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c8364161..bc52e96f 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589..79299478 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d..205c28d6 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff4..1be8ce94 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a..f78d89fc 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875ba..b04edb7d 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go new file mode 100644 index 00000000..6a46f478 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go @@ -0,0 +1,16 @@ +// +build gofuzz + +package snappy + +func Fuzz(data []byte) int { + decode, err := Decode(data) + if decode == nil && err == nil { + panic("nil error with nil result") + } + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go index b8f8b51f..ea8f7afe 100644 --- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -3,40 +3,128 @@ package snappy import ( "bytes" "encoding/binary" + "errors" master "github.com/golang/snappy" ) -var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} +const ( + sizeOffset = 16 + sizeBytes = 4 +) + +var ( + xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + + // This is xerial version 1 and minimally compatible with version 1 + xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1} + + // ErrMalformed is returned by the decoder when the xerial framing + // is malformed + ErrMalformed = errors.New("malformed xerial framing") +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} // Encode encodes data as snappy with no framing header. func Encode(src []byte) []byte { return master.Encode(nil, src) } +// EncodeStream *appends* to the specified 'dst' the compressed +// 'src' in xerial framing format. If 'dst' does not have enough +// capacity, then a new slice will be allocated. If 'dst' has +// non-zero length, then if *must* have been built using this function. +func EncodeStream(dst, src []byte) []byte { + if len(dst) == 0 { + dst = append(dst, xerialHeader...) + dst = append(dst, xerialVersionInfo...) + } + + // Snappy encode in blocks of maximum 32KB + var ( + max = len(src) + blockSize = 32 * 1024 + pos = 0 + chunk []byte + ) + + for pos < max { + newPos := min(pos + blockSize, max) + chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos]) + + // First encode the compressed size (big-endian) + // Put* panics if the buffer is too small, so pad 4 bytes first + origLen := len(dst) + dst = append(dst, dst[0:4]...) + binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk))) + + // And now the compressed data + dst = append(dst, chunk...) + pos = newPos + } + return dst +} + // Decode decodes snappy data whether it is traditional unframed // or includes the xerial framing format. func Decode(src []byte) ([]byte, error) { + return DecodeInto(nil, src) +} + +// DecodeInto decodes snappy data whether it is traditional unframed +// or includes the xerial framing format into the specified `dst`. +// It is assumed that the entirety of `dst` including all capacity is available +// for use by this function. If `dst` is nil *or* insufficiently large to hold +// the decoded `src`, new space will be allocated. +func DecodeInto(dst, src []byte) ([]byte, error) { + var max = len(src) + if max < len(xerialHeader) { + return nil, ErrMalformed + } + if !bytes.Equal(src[:8], xerialHeader) { - return master.Decode(nil, src) + return master.Decode(dst[:cap(dst)], src) + } + + if max < sizeOffset+sizeBytes { + return nil, ErrMalformed + } + + if dst == nil { + dst = make([]byte, 0, len(src)) } + dst = dst[:0] var ( - pos = uint32(16) - max = uint32(len(src)) - dst = make([]byte, 0, len(src)) + pos = sizeOffset chunk []byte - err error + err error ) - for pos < max { - size := binary.BigEndian.Uint32(src[pos : pos+4]) - pos += 4 - chunk, err = master.Decode(chunk, src[pos:pos+size]) + for pos+sizeBytes <= max { + size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes])) + pos += sizeBytes + + nextPos := pos + size + // On architectures where int is 32-bytes wide size + pos could + // overflow so we need to check the low bound as well as the + // high + if nextPos < pos || nextPos > max { + return nil, ErrMalformed + } + + chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos]) + if err != nil { return nil, err } - pos += size + pos = nextPos dst = append(dst, chunk...) } return dst, nil diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index 87496890..8d393e90 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -138,7 +138,7 @@ func NewBufferedWriter(w io.Writer) *Writer { } } -// Writer is an io.Writer than can write Snappy-compressed bytes. +// Writer is an io.Writer that can write Snappy-compressed bytes. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go index 2a56fb50..150d91bc 100644 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -26,4 +26,4 @@ func extendMatch(src []byte, i, j int) int // encodeBlock has the same semantics as in encode_other.go. // //go:noescape -func encodeBlock(dst, src []byte) (d int) \ No newline at end of file +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 00000000..f6406bb2 --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go index 0cf5e379..ece692ea 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/golang/snappy/snappy.go @@ -2,10 +2,21 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. // -// The C++ snappy implementation is at https://github.com/google/snappy +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. package snappy // import "github.com/golang/snappy" import ( diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh new file mode 100644 index 00000000..648c9016 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.install.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ev + +go get github.com/google/gopacket +go get github.com/google/gopacket/layers +go get github.com/google/gopacket/tcpassembly +go get github.com/google/gopacket/reassembly +go get github.com/google/gopacket/pcapgo diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh new file mode 100644 index 00000000..a483f4f7 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.script.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ev + +go test github.com/google/gopacket +go test github.com/google/gopacket/layers +go test github.com/google/gopacket/tcpassembly +go test github.com/google/gopacket/reassembly +go test github.com/google/gopacket/pcapgo +go test github.com/google/gopacket/pcap diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml index ceb1c7e6..8ebb01d7 100644 --- a/vendor/github.com/google/gopacket/.travis.yml +++ b/vendor/github.com/google/gopacket/.travis.yml @@ -1,18 +1,55 @@ language: go -before_install: - - sudo apt-get install libpcap-dev -install: - - go get github.com/google/gopacket - - go get github.com/google/gopacket/layers - - go get github.com/google/gopacket/pcapgo - - go get github.com/google/gopacket/tcpassembly - - go get github.com/google/gopacket/reassembly -script: - - go test github.com/google/gopacket - - go test github.com/google/gopacket/layers - - go test github.com/google/gopacket/pcapgo - - go test github.com/google/gopacket/tcpassembly - - go test github.com/google/gopacket/reassembly - - ./.travis.gofmt.sh - - ./.travis.govet.sh - - ./.travis.golint.sh +go: + - 1.11.x + - 1.12.x + - master + +addons: + apt: + packages: + libpcap-dev + +# use modules except for older versions (see below) +install: true + +env: + - GO111MODULE=on + +script: ./.travis.script.sh + +matrix: + fast_finish: true + allow_failures: + - go: master + +jobs: + include: + - go: 1.5.x + install: ./.travis.install.sh + - go: 1.6.x + install: ./.travis.install.sh + - go: 1.7.x + install: ./.travis.install.sh + - go: 1.8.x + install: ./.travis.install.sh + - go: 1.9.x + install: ./.travis.install.sh + - go: 1.10.x + install: ./.travis.install.sh + - os: osx + go: 1.x + - os: windows + go: 1.x + # winpcap does not work on travis ci - so install nmap to get libpcap + before_install: choco install nmap + - stage: style + name: "fmt/vet/lint" + go: 1.x + script: + - ./.travis.gofmt.sh + - ./.travis.govet.sh + - ./.travis.golint.sh + +stages: + - style + - test diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS index 339d6f6c..84319853 100644 --- a/vendor/github.com/google/gopacket/AUTHORS +++ b/vendor/github.com/google/gopacket/AUTHORS @@ -16,6 +16,7 @@ Bill Green Christian Mäder Gernot Vormayr Vitor Garcia Graveto +Elias Chavarria Reyes CONTRIBUTORS: Attila Oláh diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md index fe76f7a2..a2f48a9f 100644 --- a/vendor/github.com/google/gopacket/README.md +++ b/vendor/github.com/google/gopacket/README.md @@ -6,7 +6,7 @@ See [godoc](https://godoc.org/github.com/google/gopacket) for more details. [![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket) [![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket) -Minimum Go version required is 1.5. +Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.7 due to x/sys/unix dependencies. Originally forked from the gopcap project written by Andreas Krennmair (http://github.com/akrennmair/gopcap). diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go index ac57e59a..8e33e562 100644 --- a/vendor/github.com/google/gopacket/doc.go +++ b/vendor/github.com/google/gopacket/doc.go @@ -22,7 +22,8 @@ useful, including: Also, if you're looking to dive right into code, see the examples subdirectory for numerous simple binaries built using gopacket libraries. -Minimum go version required is 1.5. +Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, +and bsdbpf which need at least 1.7 due to x/sys/unix dependencies. Basic Usage diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go index 7203ead0..a00c8839 100644 --- a/vendor/github.com/google/gopacket/flows.go +++ b/vendor/github.com/google/gopacket/flows.go @@ -49,7 +49,7 @@ func (a Endpoint) Raw() []byte { return a.raw[:a.len] } // For some endpoints, the actual comparison may not make sense, however this // ordering does provide useful information for most Endpoint types. // Ordering is based first on endpoint type, then on raw endpoint bytes. -// Endpoint bytes are sorted lexigraphically. +// Endpoint bytes are sorted lexicographically. func (a Endpoint) LessThan(b Endpoint) bool { return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0) } diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod new file mode 100644 index 00000000..99e99f4d --- /dev/null +++ b/vendor/github.com/google/gopacket/go.mod @@ -0,0 +1,8 @@ +module github.com/google/gopacket + +go 1.12 + +require ( + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 +) diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum new file mode 100644 index 00000000..2b289429 --- /dev/null +++ b/vendor/github.com/google/gopacket/go.sum @@ -0,0 +1,7 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go index eae36dbb..a0b2d721 100644 --- a/vendor/github.com/google/gopacket/layers/dns.go +++ b/vendor/github.com/google/gopacket/layers/dns.go @@ -1,4 +1,4 @@ -// Copyright 2014 Google, Inc. All rights reserved. +// Copyright 2014, 2018 GoPacket Authors. All rights reserved. // // Use of this source code is governed by a BSD-style license // that can be found in the LICENSE file in the root of the source @@ -11,6 +11,7 @@ import ( "errors" "fmt" "net" + "strings" "github.com/google/gopacket" ) @@ -69,6 +70,7 @@ const ( DNSTypeTXT DNSType = 16 // text strings DNSTypeAAAA DNSType = 28 // a IPv6 host address [RFC3596] DNSTypeSRV DNSType = 33 // server discovery [RFC2782] [RFC6195] + DNSTypeOPT DNSType = 41 // OPT Pseudo-RR [RFC6891] ) func (dt DNSType) String() string { @@ -111,6 +113,8 @@ func (dt DNSType) String() string { return "AAAA" case DNSTypeSRV: return "SRV" + case DNSTypeOPT: + return "OPT" } } @@ -423,6 +427,12 @@ func recSize(rr *DNSResourceRecord) int { return l case DNSTypeSRV: return 6 + len(rr.SRV.Name) + 2 + case DNSTypeOPT: + l := len(rr.OPT) * 4 + for _, opt := range rr.OPT { + l += len(opt.Data) + } + return l } return 0 @@ -431,7 +441,14 @@ func recSize(rr *DNSResourceRecord) int { func computeSize(recs []DNSResourceRecord) int { sz := 0 for _, rr := range recs { - sz += len(rr.Name) + 12 + v := len(rr.Name) + + if v == 0 { + sz += v + 11 + } else { + sz += v + 12 + } + sz += recSize(&rr) } return sz @@ -592,7 +609,7 @@ loop: } } if len(*buffer) <= start { - return nil, 0, errDNSNameHasNoData + return (*buffer)[start:], index + 1, nil } return (*buffer)[start+1:], index + 1, nil } @@ -619,9 +636,10 @@ func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback func (q *DNSQuestion) encode(data []byte, offset int) int { noff := encodeName(q.Name, data, offset) + nSz := noff - offset binary.BigEndian.PutUint16(data[noff:], uint16(q.Type)) binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class)) - return len(q.Name) + 6 + return nSz + 4 } // DNSResourceRecord @@ -665,6 +683,7 @@ type DNSResourceRecord struct { SOA DNSSOA SRV DNSSRV MX DNSMX + OPT []DNSOPT // See RFC 6891, section 6.1.2 // Undecoded TXT for backward compatibility TXT []byte @@ -707,6 +726,12 @@ func encodeName(name []byte, data []byte, offset int) int { l++ } } + + if len(name) == 0 { + data[offset] = 0x00 // terminal + return offset + 1 + } + // length for final portion data[offset+len(name)-l] = byte(l) data[offset+len(name)+1] = 0x00 // terminal @@ -716,6 +741,7 @@ func encodeName(name []byte, data []byte, offset int) int { func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) { noff := encodeName(rr.Name, data, offset) + nSz := noff - offset binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type)) binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class)) @@ -755,6 +781,14 @@ func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.Seria binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight) binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port) encodeName(rr.SRV.Name, data, noff+16) + case DNSTypeOPT: + noff2 := noff + 10 + for _, opt := range rr.OPT { + binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code)) + binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data))) + copy(data[noff2+4:], opt.Data) + noff2 += 4 + len(opt.Data) + } default: return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type) } @@ -767,11 +801,18 @@ func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.Seria rr.DataLength = uint16(dSz) } - return len(rr.Name) + 1 + 11 + dSz, nil + return nSz + 10 + dSz, nil } func (rr *DNSResourceRecord) String() string { + if rr.Type == DNSTypeOPT { + opts := make([]string, len(rr.OPT)) + for i, opt := range rr.OPT { + opts[i] = opt.String() + } + return "OPT " + strings.Join(opts, ",") + } if rr.Class == DNSClassIN { switch rr.Type { case DNSTypeA, DNSTypeAAAA: @@ -803,6 +844,32 @@ func decodeCharacterStrings(data []byte) ([][]byte, error) { return strings, nil } +func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) { + allOPT := []DNSOPT{} + end := len(data) + + if offset == end { + return allOPT, nil // There is no data to read + } + + if offset+4 > end { + return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset) + } + + for i := offset; i < end; { + opt := DNSOPT{} + opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2])) + l := binary.BigEndian.Uint16(data[i+2 : i+4]) + if i+4+int(l) > end { + return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l) + } + opt.Data = data[i+4 : i+4+int(l)] + allOPT = append(allOPT, opt) + i += int(l) + 4 + } + return allOPT, nil +} + func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error { switch rr.Type { case DNSTypeA: @@ -866,6 +933,12 @@ func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte return err } rr.SRV.Name = name + case DNSTypeOPT: + allOPT, err := decodeOPTs(data, offset) + if err != nil { + return err + } + rr.OPT = allOPT } return nil } @@ -891,6 +964,72 @@ type DNSMX struct { Name []byte } +// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2 +type DNSOptionCode uint16 + +func (doc DNSOptionCode) String() string { + switch doc { + default: + return "Unknown" + case DNSOptionCodeNSID: + return "NSID" + case DNSOptionCodeDAU: + return "DAU" + case DNSOptionCodeDHU: + return "DHU" + case DNSOptionCodeN3U: + return "N3U" + case DNSOptionCodeEDNSClientSubnet: + return "EDNSClientSubnet" + case DNSOptionCodeEDNSExpire: + return "EDNSExpire" + case DNSOptionCodeCookie: + return "Cookie" + case DNSOptionCodeEDNSKeepAlive: + return "EDNSKeepAlive" + case DNSOptionCodePadding: + return "CodePadding" + case DNSOptionCodeChain: + return "CodeChain" + case DNSOptionCodeEDNSKeyTag: + return "CodeEDNSKeyTag" + case DNSOptionCodeEDNSClientTag: + return "EDNSClientTag" + case DNSOptionCodeEDNSServerTag: + return "EDNSServerTag" + case DNSOptionCodeDeviceID: + return "DeviceID" + } +} + +// DNSOptionCode known values. See IANA +const ( + DNSOptionCodeNSID DNSOptionCode = 3 + DNSOptionCodeDAU DNSOptionCode = 5 + DNSOptionCodeDHU DNSOptionCode = 6 + DNSOptionCodeN3U DNSOptionCode = 7 + DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8 + DNSOptionCodeEDNSExpire DNSOptionCode = 9 + DNSOptionCodeCookie DNSOptionCode = 10 + DNSOptionCodeEDNSKeepAlive DNSOptionCode = 11 + DNSOptionCodePadding DNSOptionCode = 12 + DNSOptionCodeChain DNSOptionCode = 13 + DNSOptionCodeEDNSKeyTag DNSOptionCode = 14 + DNSOptionCodeEDNSClientTag DNSOptionCode = 16 + DNSOptionCodeEDNSServerTag DNSOptionCode = 17 + DNSOptionCodeDeviceID DNSOptionCode = 26946 +) + +// DNSOPT is a DNS Option, see RFC6891, section 6.1.2 +type DNSOPT struct { + Code DNSOptionCode + Data []byte +} + +func (opt DNSOPT) String() string { + return fmt.Sprintf("%s=%x", opt.Code, opt.Data) +} + var ( errMaxRecursion = errors.New("max DNS recursion level hit") diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go index a2d47986..12aa5ba2 100644 --- a/vendor/github.com/google/gopacket/layers/eapol.go +++ b/vendor/github.com/google/gopacket/layers/eapol.go @@ -161,6 +161,11 @@ func (ek *EAPOLKey) LayerType() gopacket.LayerType { return LayerTypeEAPOLKey } +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (ek *EAPOLKey) CanDecode() gopacket.LayerType { + return LayerTypeEAPOLKey +} + // NextLayerType returns layers.LayerTypeDot11InformationElement if the key // data exists and is unencrypted, otherwise it does not expect a next layer. func (ek *EAPOLKey) NextLayerType() gopacket.LayerType { diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go index 2f2e4a52..cad68036 100644 --- a/vendor/github.com/google/gopacket/layers/llc.go +++ b/vendor/github.com/google/gopacket/layers/llc.go @@ -96,7 +96,7 @@ func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { // CanDecode returns the set of layer types that this DecodingLayer can decode. func (s *SNAP) CanDecode() gopacket.LayerClass { - return LayerTypeLLC + return LayerTypeSNAP } // NextLayerType returns the layer type contained by this DecodingLayer. diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go index 9484a8ef..c56fe89d 100644 --- a/vendor/github.com/google/gopacket/layers/sflow.go +++ b/vendor/github.com/google/gopacket/layers/sflow.go @@ -2457,6 +2457,16 @@ type SFlowPORTNAME struct { Str string } +func decodeString(data *[]byte) (len uint32, str string) { + *data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + str = string((*data)[:len]) + if (len % 4) != 0 { + len += 4 - len%4 + } + *data = (*data)[len:] + return +} + func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) { pn := SFlowPORTNAME{} var cdf SFlowCounterDataFormat @@ -2464,8 +2474,7 @@ func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) { *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) pn.EnterpriseID, pn.Format = cdf.decode() *data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) - *data, pn.Len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) - *data, pn.Str = (*data)[8:], string(binary.BigEndian.Uint64((*data)[:8])) + pn.Len, pn.Str = decodeString(data) return pn, nil } diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go index 01ab7907..54036881 100644 --- a/vendor/github.com/google/gopacket/layers/sip.go +++ b/vendor/github.com/google/gopacket/layers/sip.go @@ -233,6 +233,16 @@ func (s *SIP) Payload() []byte { return s.BaseLayer.Payload } +// CanDecode returns the set of layer types that this DecodingLayer can decode +func (s *SIP) CanDecode() gopacket.LayerClass { + return LayerTypeSIP +} + +// NextLayerType returns the layer type contained by this DecodingLayer +func (s *SIP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + // DecodeFromBytes decodes the slice into the SIP struct. func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go index e2e9a341..3a7c4b3d 100644 --- a/vendor/github.com/google/gopacket/packet.go +++ b/vendor/github.com/google/gopacket/packet.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "net" "os" "reflect" "runtime/debug" @@ -809,17 +810,37 @@ func (p *PacketSource) NextPacket() (Packet, error) { } // packetsToChannel reads in all packets from the packet source and sends them -// to the given channel. When it receives an error, it ignores it. When it -// receives an io.EOF, it closes the channel. +// to the given channel. This routine terminates when a non-temporary error +// is returned by NextPacket(). func (p *PacketSource) packetsToChannel() { defer close(p.c) for { packet, err := p.NextPacket() - if err == io.EOF || err == syscall.EBADF { - return - } else if err == nil { + if err == nil { p.c <- packet + continue + } + + // Immediately retry for temporary network errors + if nerr, ok := err.(net.Error); ok && nerr.Temporary() { + continue + } + + // Immediately retry for EAGAIN + if err == syscall.EAGAIN { + continue } + + // Immediately break for known unrecoverable errors + if err == io.EOF || err == io.ErrUnexpectedEOF || + err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer || + err == syscall.EBADF || + strings.Contains(err.Error(), "use of closed file") { + break + } + + // Sleep briefly and try again + time.Sleep(time.Millisecond * time.Duration(5)) } } diff --git a/vendor/github.com/google/gopacket/pcap/pcap.go b/vendor/github.com/google/gopacket/pcap/pcap.go index 6eeeb653..6a4ef635 100644 --- a/vendor/github.com/google/gopacket/pcap/pcap.go +++ b/vendor/github.com/google/gopacket/pcap/pcap.go @@ -524,13 +524,13 @@ func (p *Handle) NewBPF(expr string) (*BPF, error) { // This allows to match packets obtained from a-non GoPacket capture source // to be matched. // -// buf := make([]byte, MaxFrameSize) -// bpfi, _ := pcap.NewBPF(layers.LinkTypeEthernet, MaxFrameSize, "icmp") -// n, _ := someIO.Read(buf) -// ci := gopacket.CaptureInfo{CaptureLength: n, Length: n} -// if bpfi.Matches(ci, buf) { -// doSomething() -// } +// buf := make([]byte, MaxFrameSize) +// bpfi, _ := pcap.NewBPF(layers.LinkTypeEthernet, MaxFrameSize, "icmp") +// n, _ := someIO.Read(buf) +// ci := gopacket.CaptureInfo{CaptureLength: n, Length: n} +// if bpfi.Matches(ci, buf) { +// doSomething() +// } func NewBPF(linkType layers.LinkType, captureLength int, expr string) (*BPF, error) { h, err := pcapOpenDead(linkType, captureLength) if err != nil { diff --git a/vendor/github.com/google/gopacket/pcap/pcap_windows.go b/vendor/github.com/google/gopacket/pcap/pcap_windows.go index d8e86cd4..0a533380 100644 --- a/vendor/github.com/google/gopacket/pcap/pcap_windows.go +++ b/vendor/github.com/google/gopacket/pcap/pcap_windows.go @@ -21,6 +21,8 @@ import ( "github.com/google/gopacket/layers" ) +var pcapLoaded = false + const npcapPath = "\\Npcap" func initDllPath(kernel32 syscall.Handle) { @@ -152,9 +154,18 @@ var ( ) func init() { + LoadWinPCAP() +} + +// LoadWinPCAP attempts to dynamically load the wpcap DLL and resolve necessary functions +func LoadWinPCAP() error { + if pcapLoaded { + return nil + } + kernel32, err := syscall.LoadLibrary("kernel32.dll") if err != nil { - panic("couldn't load kernel32.dll") + return fmt.Errorf("couldn't load kernel32.dll") } defer syscall.FreeLibrary(kernel32) @@ -162,16 +173,16 @@ func init() { wpcapHandle, err = syscall.LoadLibrary("wpcap.dll") if err != nil { - panic("Couldn't load wpcap.dll") + return fmt.Errorf("couldn't load wpcap.dll") } initLoadedDllPath(kernel32) msvcrtHandle, err = syscall.LoadLibrary("msvcrt.dll") if err != nil { - panic("Couldn't load msvcrt.dll") + return fmt.Errorf("couldn't load msvcrt.dll") } callocPtr, err = syscall.GetProcAddress(msvcrtHandle, "calloc") if err != nil { - panic("Couldn't get calloc function") + return fmt.Errorf("couldn't get calloc function") } pcapStrerrorPtr = mustLoad("pcap_strerror") @@ -223,6 +234,9 @@ func init() { //libpcap <1.5 does not have pcap_set_immediate_mode pcapSetImmediateModePtr = mightLoad("pcap_set_immediate_mode") pcapHopenOfflinePtr = mustLoad("pcap_hopen_offline") + + pcapLoaded = true + return nil } func (h *pcapPkthdr) getSec() int64 { @@ -271,6 +285,11 @@ func pcapSetTstampPrecision(cptr pcapTPtr, precision int) error { } func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, error) { + err := LoadWinPCAP() + if err != nil { + return nil, err + } + buf := make([]byte, errorBufferSize) dev, err := syscall.BytePtrFromString(device) if err != nil { @@ -286,6 +305,11 @@ func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, er } func openOffline(file string) (handle *Handle, err error) { + err = LoadWinPCAP() + if err != nil { + return nil, err + } + buf := make([]byte, errorBufferSize) f, err := syscall.BytePtrFromString(file) if err != nil { @@ -385,6 +409,11 @@ func pcapBpfProgramFromInstructions(bpfInstructions []BPFInstruction) pcapBpfPro } func pcapLookupnet(device string) (netp, maskp uint32, err error) { + err = LoadWinPCAP() + if err != nil { + return 0, 0, err + } + buf := make([]byte, errorBufferSize) dev, err := syscall.BytePtrFromString(device) if err != nil { @@ -439,6 +468,11 @@ func (p *Handle) pcapListDatalinks() (datalinks []Datalink, err error) { } func pcapOpenDead(linkType layers.LinkType, captureLength int) (*Handle, error) { + err := LoadWinPCAP() + if err != nil { + return nil, err + } + cptr, _, _ := syscall.Syscall(pcapOpenDeadPtr, 2, uintptr(linkType), uintptr(captureLength), 0) if cptr == 0 { return nil, errors.New("error opening dead capture") @@ -473,16 +507,28 @@ func (p *Handle) pcapSetDatalink(dlt layers.LinkType) error { } func pcapDatalinkValToName(dlt int) string { + err := LoadWinPCAP() + if err != nil { + panic(err) + } ret, _, _ := syscall.Syscall(pcapDatalinkValToNamePtr, 1, uintptr(dlt), 0, 0) return bytePtrToString(ret) } func pcapDatalinkValToDescription(dlt int) string { + err := LoadWinPCAP() + if err != nil { + panic(err) + } ret, _, _ := syscall.Syscall(pcapDatalinkValToDescriptionPtr, 1, uintptr(dlt), 0, 0) return bytePtrToString(ret) } func pcapDatalinkNameToVal(name string) int { + err := LoadWinPCAP() + if err != nil { + panic(err) + } cptr, err := syscall.BytePtrFromString(name) if err != nil { return 0 @@ -492,6 +538,10 @@ func pcapDatalinkNameToVal(name string) int { } func pcapLibVersion() string { + err := LoadWinPCAP() + if err != nil { + panic(err) + } ret, _, _ := syscall.Syscall(pcapLibVersionPtr, 0, 0, 0, 0) return bytePtrToString(ret) } @@ -575,8 +625,13 @@ func (p pcapDevices) addresses() pcapAddresses { } func pcapFindAllDevs() (pcapDevices, error) { - buf := make([]byte, errorBufferSize) var alldevsp pcapDevices + err := LoadWinPCAP() + if err != nil { + return alldevsp, err + } + + buf := make([]byte, errorBufferSize) ret, _, _ := syscall.Syscall(pcapFindalldevsPtr, 2, uintptr(unsafe.Pointer(&alldevsp.all)), uintptr(unsafe.Pointer(&buf[0])), 0) @@ -608,6 +663,11 @@ func (p *Handle) pcapSnapshot() int { } func (t TimestampSource) pcapTstampTypeValToName() string { + err := LoadWinPCAP() + if err != nil { + return err.Error() + } + //libpcap <1.2 doesn't have pcap_*_tstamp_* functions if pcapTstampTypeValToNamePtr == 0 { return "pcap timestamp types not supported" @@ -617,6 +677,11 @@ func (t TimestampSource) pcapTstampTypeValToName() string { } func pcapTstampTypeNameToVal(s string) (TimestampSource, error) { + err := LoadWinPCAP() + if err != nil { + return 0, err + } + //libpcap <1.2 doesn't have pcap_*_tstamp_* functions if pcapTstampTypeNameToValPtr == 0 { return 0, statusError(pcapCint(pcapError)) @@ -659,6 +724,11 @@ func (p *InactiveHandle) pcapClose() { } func pcapCreate(device string) (*InactiveHandle, error) { + err := LoadWinPCAP() + if err != nil { + return nil, err + } + buf := make([]byte, errorBufferSize) dev, err := syscall.BytePtrFromString(device) if err != nil { @@ -793,6 +863,11 @@ func (p *Handle) waitForPacket() { // openOfflineFile returns contents of input file as a *Handle. func openOfflineFile(file *os.File) (handle *Handle, err error) { + err = LoadWinPCAP() + if err != nil { + return nil, err + } + buf := make([]byte, errorBufferSize) cf := file.Fd() diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 00000000..76984907 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 00000000..fbde8b9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 00000000..dd57f9d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 00000000..0c10c4e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,83 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } + buf := make([]byte, size) + if _, err := io.ReadFull(reader, buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md new file mode 100644 index 00000000..66a2a8cc --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md @@ -0,0 +1,5 @@ +This is a temporary repository that will be removed when the issues below are fixed in the core golang code. + +## Issues +* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832) +* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834) \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go new file mode 100644 index 00000000..f1bb7671 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go @@ -0,0 +1,1003 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string +} + +func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string +} + +func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean"} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean"} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte) error { + if len(bytes) == 0 { + return StructuralError{"empty integer"} + } + if len(bytes) == 1 { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded"} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte) (ret int64, err error) { + err = checkInteger(bytes) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large"} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte) (int32, error) { + if err := checkInteger(bytes); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large"} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte) (*big.Int, error) { + if err := checkInteger(bytes); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING"} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte) (s []int, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length OBJECT IDENTIFIER"} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { + offset = initOffset + for shifted := 0; offset < len(bytes); shifted++ { + if shifted == 4 { + err = StructuralError{"base 128 integer too large"} + return + } + ret <<= 7 + b := bytes[offset] + ret |= int(b & 0x7f) + offset++ + if b&0x80 == 0 { + return + } + } + err = SyntaxError{"truncated base 128 integer"} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// PrintableString + +// parsePrintableString parses a ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b) { + err = SyntaxError{"PrintableString contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +func isPrintable(b byte) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + b == '*' +} + +// IA5String + +// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character"} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag"} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)"} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length"} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large"} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length"} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length"} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { + expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice"} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag { + err = StructuralError{"sequence tag mismatch"} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence"} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength returns true iff offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated"} + } + return + } + + // Deal with raw values. + if fieldType == rawValueType { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} + offset += t.length + v.Set(reflect.ValueOf(result)) + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes) + case TagIA5String: + result, err = parseIA5String(innerBytes) + // jtasn1 addition of following case + case TagGeneralString: + result, err = parseIA5String(innerBytes) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes) + case TagBitString: + result, err = parseBitString(innerBytes) + case TagOID: + result, err = parseObjectIdentifier(innerBytes) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} + return + } + + t, offset, err := parseTagAndLength(bytes, offset) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child"} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag"} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match"} + } + return + } + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + } + + // We have unwrapped any explicit tagging at this point. + if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated"} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes) + case TagIA5String: + v, err = parseIA5String(innerBytes) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String()} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString or IA5String can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that a APPLICATION tag is used +// default:x sets the default value for optional integer fields +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go new file mode 100644 index 00000000..7a9da49f --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + // jtasn1 case below added + case part == "generalstring": + ret.stringType = TagGeneralString + case part == "printable": + ret.stringType = TagPrintableString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { + switch t { + case objectIdentifierType: + return TagOID, false, true + case bitStringType: + return TagBitString, false, true + case timeType: + return TagUTCTime, false, true + case enumeratedType: + return TagEnum, false, true + case bigIntType: + return TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return TagInteger, false, true + case reflect.Struct: + return TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return TagSet, true, true + } + return TagSequence, true, true + case reflect.String: + return TagPrintableString, false, true + } + return 0, false, false +} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go new file mode 100644 index 00000000..f52eee9d --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go @@ -0,0 +1,659 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +// A forkableWriter is an in-memory buffer that can be +// 'forked' to create new forkableWriters that bracket the +// original. After +// pre, post := w.fork() +// the overall sequence of bytes represented is logically w+pre+post. +type forkableWriter struct { + *bytes.Buffer + pre, post *forkableWriter +} + +func newForkableWriter() *forkableWriter { + return &forkableWriter{new(bytes.Buffer), nil, nil} +} + +func (f *forkableWriter) fork() (pre, post *forkableWriter) { + if f.pre != nil || f.post != nil { + panic("have already forked") + } + f.pre = newForkableWriter() + f.post = newForkableWriter() + return f.pre, f.post +} + +func (f *forkableWriter) Len() (l int) { + l += f.Buffer.Len() + if f.pre != nil { + l += f.pre.Len() + } + if f.post != nil { + l += f.post.Len() + } + return +} + +func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { + n, err = out.Write(f.Bytes()) + if err != nil { + return + } + + var nn int + + if f.pre != nil { + nn, err = f.pre.writeTo(out) + n += nn + if err != nil { + return + } + } + + if f.post != nil { + nn, err = f.post.writeTo(out) + n += nn + } + return +} + +func marshalBase128Int(out *forkableWriter, n int64) (err error) { + if n == 0 { + err = out.WriteByte(0) + return + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + err = out.WriteByte(o) + if err != nil { + return + } + } + + return nil +} + +func marshalInt64(out *forkableWriter, i int64) (err error) { + n := int64Length(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func int64Length(i int64) (numBytes int) { + numBytes = 1 + + for i > 127 { + numBytes++ + i >>= 8 + } + + for i < -128 { + numBytes++ + i >>= 8 + } + + return +} + +func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + err = out.WriteByte(0xff) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + err = out.WriteByte(0x00) + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + err = out.WriteByte(0) + if err != nil { + return + } + } + _, err = out.Write(bytes) + } + return +} + +func marshalLength(out *forkableWriter, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + err = out.WriteByte(b) + if err != nil { + return + } + err = marshalBase128Int(out, int64(t.tag)) + if err != nil { + return + } + } else { + b |= uint8(t.tag) + err = out.WriteByte(b) + if err != nil { + return + } + } + + if t.length >= 128 { + l := lengthLength(t.length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLength(out, t.length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(t.length)) + if err != nil { + return + } + } + + return nil +} + +func marshalBitString(out *forkableWriter, b BitString) (err error) { + paddingBits := byte((8 - b.BitLength%8) % 8) + err = out.WriteByte(paddingBits) + if err != nil { + return + } + _, err = out.Write(b.Bytes) + return +} + +func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return StructuralError{"invalid object identifier"} + } + + err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) + if err != nil { + return + } + for i := 2; i < len(oid); i++ { + err = marshalBase128Int(out, int64(oid[i])) + if err != nil { + return + } + } + + return +} + +func marshalPrintableString(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if !isPrintable(c) { + return StructuralError{"PrintableString contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalIA5String(out *forkableWriter, s string) (err error) { + b := []byte(s) + for _, c := range b { + if c > 127 { + return StructuralError{"IA5String contains invalid character"} + } + } + + _, err = out.Write(b) + return +} + +func marshalUTF8String(out *forkableWriter, s string) (err error) { + _, err = out.Write([]byte(s)) + return +} + +func marshalTwoDigits(out *forkableWriter, v int) (err error) { + err = out.WriteByte(byte('0' + (v/10)%10)) + if err != nil { + return + } + return out.WriteByte(byte('0' + v%10)) +} + +func marshalFourDigits(out *forkableWriter, v int) (err error) { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + _, err = out.Write(bytes[:]) + return +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + err = marshalTwoDigits(out, year-1900) + case 2000 <= year && year < 2050: + err = marshalTwoDigits(out, year-2000) + default: + return StructuralError{"cannot represent time as UTCTime"} + } + if err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { + year := t.Year() + if year < 0 || year > 9999 { + return StructuralError{"cannot represent time as GeneralizedTime"} + } + if err = marshalFourDigits(out, year); err != nil { + return + } + + return marshalTimeCommon(out, t) +} + +func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { + _, month, day := t.Date() + + err = marshalTwoDigits(out, int(month)) + if err != nil { + return + } + + err = marshalTwoDigits(out, day) + if err != nil { + return + } + + hour, min, sec := t.Clock() + + err = marshalTwoDigits(out, hour) + if err != nil { + return + } + + err = marshalTwoDigits(out, min) + if err != nil { + return + } + + err = marshalTwoDigits(out, sec) + if err != nil { + return + } + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + err = out.WriteByte('Z') + return + case offset > 0: + err = out.WriteByte('+') + case offset < 0: + err = out.WriteByte('-') + } + + if err != nil { + return + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + err = marshalTwoDigits(out, offsetMinutes/60) + if err != nil { + return + } + + err = marshalTwoDigits(out, offsetMinutes%60) + return +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0) + if err != nil { + return in + } + return in[offset:] +} + +func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { + switch value.Type() { + case flagType: + return nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return marshalGeneralizedTime(out, t) + } else { + return marshalUTCTime(out, t) + } + case bitStringType: + return marshalBitString(out, value.Interface().(BitString)) + case objectIdentifierType: + return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) + case bigIntType: + return marshalBigInt(out, value.Interface().(*big.Int)) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return out.WriteByte(255) + } else { + return out.WriteByte(0) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return marshalInt64(out, v.Int()) + case reflect.Struct: + t := v.Type() + + startingField := 0 + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.NumField() > 0 && t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := make([]byte, s.Len()) + for i := 0; i < s.Len(); i++ { + bytes[i] = uint8(s.Index(i).Uint()) + } + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + _, err = out.Write(stripTagAndLength(bytes)) + return + } else { + startingField = 1 + } + } + + for i := startingField; i < t.NumField(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) + if err != nil { + return + } + } + return + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + bytes := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err = out.Write(bytes) + return + } + + // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value + //var fp fieldParameters + params.explicit = false + params.tag = nil + for i := 0; i < v.Len(); i++ { + var pre *forkableWriter + pre, out = out.fork() + err = marshalField(pre, v.Index(i), params) + if err != nil { + return + } + } + return + case reflect.String: + switch params.stringType { + case TagIA5String: + return marshalIA5String(out, v.String()) + case TagPrintableString: + return marshalPrintableString(out, v.String()) + default: + return marshalUTF8String(out, v.String()) + } + } + + return StructuralError{"unknown Go type"} +} + +func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { + if !v.IsValid() { + return fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return marshalField(out, v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behaviour, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + _, err = out.Write(rv.FullBytes) + } else { + err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) + if err != nil { + return + } + _, err = out.Write(rv.Bytes) + } + return + } + + tag, isCompound, ok := getUniversalType(v.Type()) + if !ok { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} + return + } + class := ClassUniversal + + if params.timeType != 0 && tag != TagUTCTime { + return StructuralError{"explicit time type given to non-time member"} + } + + // jtasn1 updated to allow slices of strings + if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) { + return StructuralError{"explicit string type given to non-string member"} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r)) { + if !utf8.ValidString(v.String()) { + return errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return StructuralError{"non sequence tagged as set"} + } + tag = TagSet + } + + tags, body := out.fork() + + err = marshalBody(body, v, params) + if err != nil { + return + } + + bodyLen := body.Len() + + var explicitTag *forkableWriter + if params.explicit { + explicitTag, tags = tags.fork() + } + + if !params.explicit && params.tag != nil { + // implicit tag. + tag = *params.tag + class = ClassContextSpecific + } + + err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) + if err != nil { + return + } + + if params.explicit { + err = marshalTagAndLength(explicitTag, tagAndLength{ + class: ClassContextSpecific, + tag: *params.tag, + length: bodyLen + tags.Len(), + isCompound: true, + }) + } + + return err +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5 strings +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString strings. +// utf8: causes strings to be marshaled as ASN.1, UTF8 strings +func Marshal(val interface{}) ([]byte, error) { + var out bytes.Buffer + v := reflect.ValueOf(val) + f := newForkableWriter() + err := marshalField(f, v, fieldParameters{}) + if err != nil { + return nil, err + } + _, err = f.writeTo(&out) + return out.Bytes(), err +} diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000..75d41876 --- /dev/null +++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + return Key64(password, salt, int64(iter), int64(keyLen), h) +} + +// Key64 derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. Key64 uses +// int64 for the iteration count and key length to allow larger values. +// The key is derived based on the method described as PBKDF2 with the HMAC +// variant using the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := int64(prf.Size()) + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := int64(1); block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[int64(len(dk))-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := int64(2); n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml index 1f980775..730c7fa5 100644 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -3,7 +3,15 @@ language: go sudo: false go: - - 1.4 + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x install: go get -v -t ./... script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md index 187ef676..110ad799 100644 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -4,4 +4,84 @@ -See http://jmespath.org for more info. +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 8e26ffee..010efe9b 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// JMESPath is the representation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 00000000..aa1e3f1c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/stretchr/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 00000000..331fa698 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index 1240a175..4abc303a 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(expression string) (ASTNode, error) { } if p.current() != tEOF { return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) + "Unexpected token at the end of the expression: %s", p.current())) } return parsed, nil } diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/compress/LICENSE similarity index 96% rename from vendor/github.com/klauspost/crc32/LICENSE rename to vendor/github.com/klauspost/compress/LICENSE index 4fd5963e..1eb75ef6 100644 --- a/vendor/github.com/klauspost/crc32/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,5 +1,5 @@ Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2015 Klaus Post +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..43e46361 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..b69237c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if uint16(charnum) > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < int(tableSize) { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = uint16(in.getBits(tableLog)) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..e12da4db --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,87 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + + * Mar 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..a4979e88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..bda4021e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,197 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000..50bcdf6e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..0843cb01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,651 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + + if s.Reuse == ReusePolicyPrefer && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := int16(startNode) + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := uint8(maxNbBits) + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..a03b2634 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1146 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) <= int(iSize) { + return s, nil, errors.New("input too small for table") + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + rank := &rankStats[w] + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..177d6c4e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,260 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000..042091d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 00000000..1c66e372 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 00000000..94a96c5d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 00000000..d24eb4b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000..74a36689 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..ac3640dc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,427 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. +Note that custom dictionaries are only supported for decompression. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd + + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +The "Fastest" compression ratio is roughly equivalent to zstd level 1. +The "Default" compression ratio is roughly equivalent to zstd level 3 (default). + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + +Compared to cgo zstd, the speed is around level 3 (default), but compression slightly worse, between level 1&2. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + w, err := NewWriter(output) + if err != nil { + return err + } + _, err := io.Copy(w, input) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +``` + +### Converters + +As part of the development process a *Snappy* -> *Zstandard* converter was also built. + +This can convert a *framed* [Snappy Stream](https://godoc.org/github.com/golang/snappy#Writer) to a zstd stream. +Note that a single block is not framed. + +Conversion is done by converting the stream directly from Snappy without intermediate full decoding. +Therefore the compression ratio is much less than what can be done by a full decompression +and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +any errors being generated. +No CRC value is being generated and not all CRC values of the Snappy stream are checked. +However, it provides really fast re-compression of Snappy streams. + + +``` +BenchmarkSnappy_ConvertSilesia-8 1 1156001600 ns/op 183.35 MB/s +Snappy len 103008711 -> zstd len 82687318 + +BenchmarkSnappy_Enwik9-8 1 6472998400 ns/op 154.49 MB/s +Snappy len 508028601 -> zstd len 390921079 +``` + + +```Go + s := zstd.SnappyConverter{} + n, err = s.Convert(input, output) + if err != nil { + fmt.Println("Re-compressed stream to", n, "bytes") + } +``` + +The converter `s` can be reused to avoid allocations, even after errors. + + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(input) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err := io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..85445853 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..303ae90f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..c8ec6e33 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literials before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litDec != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..c584f6aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,837 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + wr bitWriter + + extraLits int + last bool + + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if cap(b.literals) < maxCompressedLiteralSize { + b.literals = make([]byte, 0, maxCompressedLiteralSize) + } + const defSeqs = 200 + b.literals = b.literals[:0] + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a)) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src)) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(b.literals))) + + // Don't compress extremely small blocks + if len(b.literals) < 32 || raw { + if debug { + println("Adding RAW block, length", len(b.literals)) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(b.literals)) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(b.literals)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(b.literals), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +func (b *blockEnc) encode(raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(rawAllLits) + } + // We want some difference + if len(b.literals) > (b.size - (b.size >> 5)) { + return errIncompressible + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..658ef783 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..2c4fca17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..66b51bf2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,540 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + if r == nil { + return errors.New("nil Reader sent as input") + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + d.drainOutput() + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to ", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, errors.New("no input has been initialized") + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if uint64(cap(dst)) < frame.FrameContentSize { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate window size * 2 by default if nothing is provided and we didn't get frame content size. + size := frame.WindowSize * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..284d3844 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,84 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..8eb6f6ba --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,104 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litDec *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litDec, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..c120d905 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,518 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.Encode(blk, src) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..50276bcd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,678 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..4104b456 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,755 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } else { + if cap(e.hist) < int(e.maxMatchOff*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) Reset(singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) { + l := e.maxMatchOff * 2 + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_params.go b/vendor/github.com/klauspost/compress/zstd/enc_params.go new file mode 100644 index 00000000..d874116f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_params.go @@ -0,0 +1,157 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +/* +// encParams are not really used, just here for reference. +type encParams struct { + // largest match distance : larger == more compression, more memory needed during decompression + windowLog uint8 + + // fully searched segment : larger == more compression, slower, more memory (useless for fast) + chainLog uint8 + + // dispatch table : larger == faster, more memory + hashLog uint8 + + // < nb of searches : larger == more compression, slower + searchLog uint8 + + // < match length searched : larger == faster decompression, sometimes less compression + minMatch uint8 + + // acceptable match size for optimal parser (only) : larger == more compression, slower + targetLength uint32 + + // see ZSTD_strategy definition above + strategy strategy +} + +// strategy defines the algorithm to use when generating sequences. +type strategy uint8 + +const ( + // Compression strategies, listed from fastest to strongest + strategyFast strategy = iota + 1 + strategyDfast + strategyGreedy + strategyLazy + strategyLazy2 + strategyBtlazy2 + strategyBtopt + strategyBtultra + strategyBtultra2 + // note : new strategies _might_ be added in the future. + // Only the order (from fast to strong) is guaranteed + +) + +var defEncParams = [4][]encParams{ + { // "default" - for any srcSize > 256 KB + // W, C, H, S, L, TL, strat + {19, 12, 13, 1, 6, 1, strategyFast}, // base for negative levels + {19, 13, 14, 1, 7, 0, strategyFast}, // level 1 + {20, 15, 16, 1, 6, 0, strategyFast}, // level 2 + {21, 16, 17, 1, 5, 1, strategyDfast}, // level 3 + {21, 18, 18, 1, 5, 1, strategyDfast}, // level 4 + {21, 18, 19, 2, 5, 2, strategyGreedy}, // level 5 + {21, 19, 19, 3, 5, 4, strategyGreedy}, // level 6 + {21, 19, 19, 3, 5, 8, strategyLazy}, // level 7 + {21, 19, 19, 3, 5, 16, strategyLazy2}, // level 8 + {21, 19, 20, 4, 5, 16, strategyLazy2}, // level 9 + {22, 20, 21, 4, 5, 16, strategyLazy2}, // level 10 + {22, 21, 22, 4, 5, 16, strategyLazy2}, // level 11 + {22, 21, 22, 5, 5, 16, strategyLazy2}, // level 12 + {22, 21, 22, 5, 5, 32, strategyBtlazy2}, // level 13 + {22, 22, 23, 5, 5, 32, strategyBtlazy2}, // level 14 + {22, 23, 23, 6, 5, 32, strategyBtlazy2}, // level 15 + {22, 22, 22, 5, 5, 48, strategyBtopt}, // level 16 + {23, 23, 22, 5, 4, 64, strategyBtopt}, // level 17 + {23, 23, 22, 6, 3, 64, strategyBtultra}, // level 18 + {23, 24, 22, 7, 3, 256, strategyBtultra2}, // level 19 + {25, 25, 23, 7, 3, 256, strategyBtultra2}, // level 20 + {26, 26, 24, 7, 3, 512, strategyBtultra2}, // level 21 + {27, 27, 25, 9, 3, 999, strategyBtultra2}, // level 22 + }, + { // for srcSize <= 256 KB + // W, C, H, S, L, T, strat + {18, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels + {18, 13, 14, 1, 6, 0, strategyFast}, // level 1 + {18, 14, 14, 1, 5, 1, strategyDfast}, // level 2 + {18, 16, 16, 1, 4, 1, strategyDfast}, // level 3 + {18, 16, 17, 2, 5, 2, strategyGreedy}, // level 4. + {18, 18, 18, 3, 5, 2, strategyGreedy}, // level 5. + {18, 18, 19, 3, 5, 4, strategyLazy}, // level 6. + {18, 18, 19, 4, 4, 4, strategyLazy}, // level 7 + {18, 18, 19, 4, 4, 8, strategyLazy2}, // level 8 + {18, 18, 19, 5, 4, 8, strategyLazy2}, // level 9 + {18, 18, 19, 6, 4, 8, strategyLazy2}, // level 10 + {18, 18, 19, 5, 4, 12, strategyBtlazy2}, // level 11. + {18, 19, 19, 7, 4, 12, strategyBtlazy2}, // level 12. + {18, 18, 19, 4, 4, 16, strategyBtopt}, // level 13 + {18, 18, 19, 4, 3, 32, strategyBtopt}, // level 14. + {18, 18, 19, 6, 3, 128, strategyBtopt}, // level 15. + {18, 19, 19, 6, 3, 128, strategyBtultra}, // level 16. + {18, 19, 19, 8, 3, 256, strategyBtultra}, // level 17. + {18, 19, 19, 6, 3, 128, strategyBtultra2}, // level 18. + {18, 19, 19, 8, 3, 256, strategyBtultra2}, // level 19. + {18, 19, 19, 10, 3, 512, strategyBtultra2}, // level 20. + {18, 19, 19, 12, 3, 512, strategyBtultra2}, // level 21. + {18, 19, 19, 13, 3, 999, strategyBtultra2}, // level 22. + }, + { // for srcSize <= 128 KB + // W, C, H, S, L, T, strat + {17, 12, 12, 1, 5, 1, strategyFast}, // base for negative levels + {17, 12, 13, 1, 6, 0, strategyFast}, // level 1 + {17, 13, 15, 1, 5, 0, strategyFast}, // level 2 + {17, 15, 16, 2, 5, 1, strategyDfast}, // level 3 + {17, 17, 17, 2, 4, 1, strategyDfast}, // level 4 + {17, 16, 17, 3, 4, 2, strategyGreedy}, // level 5 + {17, 17, 17, 3, 4, 4, strategyLazy}, // level 6 + {17, 17, 17, 3, 4, 8, strategyLazy2}, // level 7 + {17, 17, 17, 4, 4, 8, strategyLazy2}, // level 8 + {17, 17, 17, 5, 4, 8, strategyLazy2}, // level 9 + {17, 17, 17, 6, 4, 8, strategyLazy2}, // level 10 + {17, 17, 17, 5, 4, 8, strategyBtlazy2}, // level 11 + {17, 18, 17, 7, 4, 12, strategyBtlazy2}, // level 12 + {17, 18, 17, 3, 4, 12, strategyBtopt}, // level 13. + {17, 18, 17, 4, 3, 32, strategyBtopt}, // level 14. + {17, 18, 17, 6, 3, 256, strategyBtopt}, // level 15. + {17, 18, 17, 6, 3, 128, strategyBtultra}, // level 16. + {17, 18, 17, 8, 3, 256, strategyBtultra}, // level 17. + {17, 18, 17, 10, 3, 512, strategyBtultra}, // level 18. + {17, 18, 17, 5, 3, 256, strategyBtultra2}, // level 19. + {17, 18, 17, 7, 3, 512, strategyBtultra2}, // level 20. + {17, 18, 17, 9, 3, 512, strategyBtultra2}, // level 21. + {17, 18, 17, 11, 3, 999, strategyBtultra2}, // level 22. + }, + { // for srcSize <= 16 KB + // W, C, H, S, L, T, strat + {14, 12, 13, 1, 5, 1, strategyFast}, // base for negative levels + {14, 14, 15, 1, 5, 0, strategyFast}, // level 1 + {14, 14, 15, 1, 4, 0, strategyFast}, // level 2 + {14, 14, 15, 2, 4, 1, strategyDfast}, // level 3 + {14, 14, 14, 4, 4, 2, strategyGreedy}, // level 4 + {14, 14, 14, 3, 4, 4, strategyLazy}, // level 5. + {14, 14, 14, 4, 4, 8, strategyLazy2}, // level 6 + {14, 14, 14, 6, 4, 8, strategyLazy2}, // level 7 + {14, 14, 14, 8, 4, 8, strategyLazy2}, // level 8. + {14, 15, 14, 5, 4, 8, strategyBtlazy2}, // level 9. + {14, 15, 14, 9, 4, 8, strategyBtlazy2}, // level 10. + {14, 15, 14, 3, 4, 12, strategyBtopt}, // level 11. + {14, 15, 14, 4, 3, 24, strategyBtopt}, // level 12. + {14, 15, 14, 5, 3, 32, strategyBtultra}, // level 13. + {14, 15, 15, 6, 3, 64, strategyBtultra}, // level 14. + {14, 15, 15, 7, 3, 256, strategyBtultra}, // level 15. + {14, 15, 15, 5, 3, 48, strategyBtultra2}, // level 16. + {14, 15, 15, 6, 3, 128, strategyBtultra2}, // level 17. + {14, 15, 15, 7, 3, 256, strategyBtultra2}, // level 18. + {14, 15, 15, 8, 3, 256, strategyBtultra2}, // level 19. + {14, 15, 15, 8, 3, 512, strategyBtultra2}, // level 20. + {14, 15, 15, 9, 3, 512, strategyBtultra2}, // level 21. + {14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22. + }, +} +*/ diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..c56d2241 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + // If not single block, history will be allocated on first use. + enc.Reset(true) + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: 0, + } + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + // Maybe handle stuff queued? + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, e.nextBlock(true) + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + enc.Reset(true) + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: 0, + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + enc.EncodeNoHist(blk, src) + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.reset(nil) + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..dfac14dd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,267 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + // use less ram: true for now, but may change. + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedDefault: + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}}} + case SpeedBetterCompression: + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + case SpeedFastest: + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize)}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast + + // SpeedBestCompression will choose the best available compression option. + // For now this is not implemented. + SpeedBestCompression = SpeedBetterCompression +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := EncoderLevel(speedNotSet + 1); l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level > 5: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..fc4a566d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..4479cfe1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 // Not stored. +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..e6d3d49b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..aa9eba88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < int(tableSize) { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int16(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int16(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = uint32(total >> tableLog) + lowOne = uint32((total * 3) >> (tableLog + 1)) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = uint32((total * 3) / (toDistribute * 2)) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..6c17dc17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..4a752067 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..f418f50f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litDec != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litDec +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..69aa3bb5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..426b9cac --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 00000000..35318d7c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..2c9c5357 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..4a5a8216 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..7ff87040 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,485 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, will be extremely rarely triggered, + // but could be if destination slice is too small for sync operations. + // We add maxBlockSize to the capacity. + s.out = append(s.out, make([]byte, maxBlockSize)...) + s.out = s.out[:len(s.out)-maxBlockSize] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..36bcc3cc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..690428cd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..0807719c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,144 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/klauspost/crc32/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml deleted file mode 100644 index de64ae49..00000000 --- a/vendor/github.com/klauspost/crc32/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip - -script: - - go test -v . - - go test -v -race . diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md deleted file mode 100644 index 029625d3..00000000 --- a/vendor/github.com/klauspost/crc32/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# crc32 -CRC32 hash with x64 optimizations - -This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. - -[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) - -# usage - -Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. - -Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. - -# changes -* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. -* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. - - -# performance - -For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. - - -For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: -``` -benchmark old ns/op new ns/op delta -BenchmarkCrc32KB 99955 10258 -89.74% - -benchmark old MB/s new MB/s speedup -BenchmarkCrc32KB 327.83 3194.20 9.74x -``` - -For other tables and "CLMUL" capable machines the performance is the same as the standard library. - -Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. - -``` -Std: Standard Go 1.5 library -Crc: Indicates IEEE type CRC. -40B: Size of each slice encoded. -NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). -Castagnoli: Castagnoli CRC type. - -BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s -BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) -BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) - -BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s -BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) -BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) - -BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) -BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) -BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) - -BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) -BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) -BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) - -BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s -BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) -BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) -BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s -BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) -BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) -BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s -BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) -BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) -BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) - -BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s -BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) -BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) -BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) -``` - -The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. - -However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. - -# license - -Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go deleted file mode 100644 index 8aa91b17..00000000 --- a/vendor/github.com/klauspost/crc32/crc32.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, -// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for -// information. -// -// Polynomials are represented in LSB-first form also known as reversed representation. -// -// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials -// for information. -package crc32 - -import ( - "hash" - "sync" -) - -// The size of a CRC-32 checksum in bytes. -const Size = 4 - -// Predefined polynomials. -const ( - // IEEE is by far and away the most common CRC-32 polynomial. - // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... - IEEE = 0xedb88320 - - // Castagnoli's polynomial, used in iSCSI. - // Has better error detection characteristics than IEEE. - // http://dx.doi.org/10.1109/26.231911 - Castagnoli = 0x82f63b78 - - // Koopman's polynomial. - // Also has better error detection characteristics than IEEE. - // http://dx.doi.org/10.1109/DSN.2002.1028931 - Koopman = 0xeb31d82e -) - -// Table is a 256-word table representing the polynomial for efficient processing. -type Table [256]uint32 - -// This file makes use of functions implemented in architecture-specific files. -// The interface that they implement is as follows: -// -// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE -// // algorithm is available. -// archAvailableIEEE() bool -// -// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. -// // It can only be called if archAvailableIEEE() returns true. -// archInitIEEE() -// -// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if -// // archInitIEEE() was previously called. -// archUpdateIEEE(crc uint32, p []byte) uint32 -// -// // archAvailableCastagnoli reports whether an architecture-specific -// // CRC32-C algorithm is available. -// archAvailableCastagnoli() bool -// -// // archInitCastagnoli initializes the architecture-specific CRC32-C -// // algorithm. It can only be called if archAvailableCastagnoli() returns -// // true. -// archInitCastagnoli() -// -// // archUpdateCastagnoli updates the given CRC32-C. It can only be called -// // if archInitCastagnoli() was previously called. -// archUpdateCastagnoli(crc uint32, p []byte) uint32 - -// castagnoliTable points to a lazily initialized Table for the Castagnoli -// polynomial. MakeTable will always return this value when asked to make a -// Castagnoli table so we can compare against it to find when the caller is -// using this polynomial. -var castagnoliTable *Table -var castagnoliTable8 *slicing8Table -var castagnoliArchImpl bool -var updateCastagnoli func(crc uint32, p []byte) uint32 -var castagnoliOnce sync.Once - -func castagnoliInit() { - castagnoliTable = simpleMakeTable(Castagnoli) - castagnoliArchImpl = archAvailableCastagnoli() - - if castagnoliArchImpl { - archInitCastagnoli() - updateCastagnoli = archUpdateCastagnoli - } else { - // Initialize the slicing-by-8 table. - castagnoliTable8 = slicingMakeTable(Castagnoli) - updateCastagnoli = func(crc uint32, p []byte) uint32 { - return slicingUpdate(crc, castagnoliTable8, p) - } - } -} - -// IEEETable is the table for the IEEE polynomial. -var IEEETable = simpleMakeTable(IEEE) - -// ieeeTable8 is the slicing8Table for IEEE -var ieeeTable8 *slicing8Table -var ieeeArchImpl bool -var updateIEEE func(crc uint32, p []byte) uint32 -var ieeeOnce sync.Once - -func ieeeInit() { - ieeeArchImpl = archAvailableIEEE() - - if ieeeArchImpl { - archInitIEEE() - updateIEEE = archUpdateIEEE - } else { - // Initialize the slicing-by-8 table. - ieeeTable8 = slicingMakeTable(IEEE) - updateIEEE = func(crc uint32, p []byte) uint32 { - return slicingUpdate(crc, ieeeTable8, p) - } - } -} - -// MakeTable returns a Table constructed from the specified polynomial. -// The contents of this Table must not be modified. -func MakeTable(poly uint32) *Table { - switch poly { - case IEEE: - ieeeOnce.Do(ieeeInit) - return IEEETable - case Castagnoli: - castagnoliOnce.Do(castagnoliInit) - return castagnoliTable - } - return simpleMakeTable(poly) -} - -// digest represents the partial evaluation of a checksum. -type digest struct { - crc uint32 - tab *Table -} - -// New creates a new hash.Hash32 computing the CRC-32 checksum -// using the polynomial represented by the Table. -// Its Sum method will lay the value out in big-endian byte order. -func New(tab *Table) hash.Hash32 { - if tab == IEEETable { - ieeeOnce.Do(ieeeInit) - } - return &digest{0, tab} -} - -// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum -// using the IEEE polynomial. -// Its Sum method will lay the value out in big-endian byte order. -func NewIEEE() hash.Hash32 { return New(IEEETable) } - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -// Update returns the result of adding the bytes in p to the crc. -func Update(crc uint32, tab *Table, p []byte) uint32 { - switch tab { - case castagnoliTable: - return updateCastagnoli(crc, p) - case IEEETable: - // Unfortunately, because IEEETable is exported, IEEE may be used without a - // call to MakeTable. We have to make sure it gets initialized in that case. - ieeeOnce.Do(ieeeInit) - return updateIEEE(crc, p) - default: - return simpleUpdate(crc, tab, p) - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - switch d.tab { - case castagnoliTable: - d.crc = updateCastagnoli(d.crc, p) - case IEEETable: - // We only create digest objects through New() which takes care of - // initialization in this case. - d.crc = updateIEEE(d.crc, p) - default: - d.crc = simpleUpdate(d.crc, d.tab, p) - } - return len(p), nil -} - -func (d *digest) Sum32() uint32 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// Checksum returns the CRC-32 checksum of data -// using the polynomial represented by the Table. -func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } - -// ChecksumIEEE returns the CRC-32 checksum of data -// using the IEEE polynomial. -func ChecksumIEEE(data []byte) uint32 { - ieeeOnce.Do(ieeeInit) - return updateIEEE(0, data) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go deleted file mode 100644 index af2a0b84..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gccgo - -// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a -// description of the interface that each architecture-specific file -// implements. - -package crc32 - -import "unsafe" - -// This file contains the code to call the SSE 4.2 version of the Castagnoli -// and IEEE CRC. - -// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use -// CPUID to test for SSE 4.1, 4.2 and CLMUL support. -func haveSSE41() bool -func haveSSE42() bool -func haveCLMUL() bool - -// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42(crc uint32, p []byte) uint32 - -// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42Triple( - crcA, crcB, crcC uint32, - a, b, c []byte, - rounds uint32, -) (retA uint32, retB uint32, retC uint32) - -// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ -// instruction as well as SSE 4.1. -//go:noescape -func ieeeCLMUL(crc uint32, p []byte) uint32 - -var sse42 = haveSSE42() -var useFastIEEE = haveCLMUL() && haveSSE41() - -const castagnoliK1 = 168 -const castagnoliK2 = 1344 - -type sse42Table [4]Table - -var castagnoliSSE42TableK1 *sse42Table -var castagnoliSSE42TableK2 *sse42Table - -func archAvailableCastagnoli() bool { - return sse42 -} - -func archInitCastagnoli() { - if !sse42 { - panic("arch-specific Castagnoli not available") - } - castagnoliSSE42TableK1 = new(sse42Table) - castagnoliSSE42TableK2 = new(sse42Table) - // See description in updateCastagnoli. - // t[0][i] = CRC(i000, O) - // t[1][i] = CRC(0i00, O) - // t[2][i] = CRC(00i0, O) - // t[3][i] = CRC(000i, O) - // where O is a sequence of K zeros. - var tmp [castagnoliK2]byte - for b := 0; b < 4; b++ { - for i := 0; i < 256; i++ { - val := uint32(i) << uint32(b*8) - castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) - castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) - } - } -} - -// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the -// table given) with the given initial crc value. This corresponds to -// CRC(crc, O) in the description in updateCastagnoli. -func castagnoliShift(table *sse42Table, crc uint32) uint32 { - return table[3][crc>>24] ^ - table[2][(crc>>16)&0xFF] ^ - table[1][(crc>>8)&0xFF] ^ - table[0][crc&0xFF] -} - -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !sse42 { - panic("not available") - } - - // This method is inspired from the algorithm in Intel's white paper: - // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" - // The same strategy of splitting the buffer in three is used but the - // combining calculation is different; the complete derivation is explained - // below. - // - // -- The basic idea -- - // - // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a - // time. In recent Intel architectures the instruction takes 3 cycles; - // however the processor can pipeline up to three instructions if they - // don't depend on each other. - // - // Roughly this means that we can process three buffers in about the same - // time we can process one buffer. - // - // The idea is then to split the buffer in three, CRC the three pieces - // separately and then combine the results. - // - // Combining the results requires precomputed tables, so we must choose a - // fixed buffer length to optimize. The longer the length, the faster; but - // only buffers longer than this length will use the optimization. We choose - // two cutoffs and compute tables for both: - // - one around 512: 168*3=504 - // - one around 4KB: 1344*3=4032 - // - // -- The nitty gritty -- - // - // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with - // initial non-inverted CRC I). This function has the following properties: - // (a) CRC(I, AB) = CRC(CRC(I, A), B) - // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) - // - // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of - // K bytes each, where K is a fixed constant. Let O be the sequence of K zero - // bytes. - // - // CRC(I, ABC) = CRC(I, ABO xor C) - // = CRC(I, ABO) xor CRC(0, C) - // = CRC(CRC(I, AB), O) xor CRC(0, C) - // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) - // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) - // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) - // - // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), - // and CRC(0, C) efficiently. We just need to find a way to quickly compute - // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these - // values; since we can't have a 32-bit table, we break it up into four - // 8-bit tables: - // - // CRC(uvwx, O) = CRC(u000, O) xor - // CRC(0v00, O) xor - // CRC(00w0, O) xor - // CRC(000x, O) - // - // We can compute tables corresponding to the four terms for all 8-bit - // values. - - crc = ^crc - - // If a buffer is long enough to use the optimization, process the first few - // bytes to align the buffer to an 8 byte boundary (if necessary). - if len(p) >= castagnoliK1*3 { - delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) - if delta != 0 { - delta = 8 - delta - crc = castagnoliSSE42(crc, p[:delta]) - p = p[delta:] - } - } - - // Process 3*K2 at a time. - for len(p) >= castagnoliK2*3 { - // Compute CRC(I, A), CRC(0, B), and CRC(0, C). - crcA, crcB, crcC := castagnoliSSE42Triple( - crc, 0, 0, - p, p[castagnoliK2:], p[castagnoliK2*2:], - castagnoliK2/24) - - // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) - crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB - // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) - crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC - p = p[castagnoliK2*3:] - } - - // Process 3*K1 at a time. - for len(p) >= castagnoliK1*3 { - // Compute CRC(I, A), CRC(0, B), and CRC(0, C). - crcA, crcB, crcC := castagnoliSSE42Triple( - crc, 0, 0, - p, p[castagnoliK1:], p[castagnoliK1*2:], - castagnoliK1/24) - - // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) - crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB - // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) - crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC - p = p[castagnoliK1*3:] - } - - // Use the simple implementation for what's left. - crc = castagnoliSSE42(crc, p) - return ^crc -} - -func archAvailableIEEE() bool { - return useFastIEEE -} - -var archIeeeTable8 *slicing8Table - -func archInitIEEE() { - if !useFastIEEE { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archIeeeTable8 = slicingMakeTable(IEEE) -} - -func archUpdateIEEE(crc uint32, p []byte) uint32 { - if !useFastIEEE { - panic("not available") - } - - if len(p) >= 64 { - left := len(p) & 15 - do := len(p) - left - crc = ^ieeeCLMUL(^crc, p[:do]) - p = p[do:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archIeeeTable8, p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s deleted file mode 100644 index e8a7941c..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64.s +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#define NOSPLIT 4 -#define RODATA 8 - -// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. -// -// func castagnoliSSE42(crc uint32, p []byte) uint32 -TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 - MOVL crc+0(FP), AX // CRC value - MOVQ p+8(FP), SI // data pointer - MOVQ p_len+16(FP), CX // len(p) - - // If there are fewer than 8 bytes to process, skip alignment. - CMPQ CX, $8 - JL less_than_8 - - MOVQ SI, BX - ANDQ $7, BX - JZ aligned - - // Process the first few bytes to 8-byte align the input. - - // BX = 8 - BX. We need to process this many bytes to align. - SUBQ $1, BX - XORQ $7, BX - - BTQ $0, BX - JNC align_2 - - CRC32B (SI), AX - DECQ CX - INCQ SI - -align_2: - BTQ $1, BX - JNC align_4 - - // CRC32W (SI), AX - BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - - SUBQ $2, CX - ADDQ $2, SI - -align_4: - BTQ $2, BX - JNC aligned - - // CRC32L (SI), AX - BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - - SUBQ $4, CX - ADDQ $4, SI - -aligned: - // The input is now 8-byte aligned and we can process 8-byte chunks. - CMPQ CX, $8 - JL less_than_8 - - CRC32Q (SI), AX - ADDQ $8, SI - SUBQ $8, CX - JMP aligned - -less_than_8: - // We may have some bytes left over; process 4 bytes, then 2, then 1. - BTQ $2, CX - JNC less_than_4 - - // CRC32L (SI), AX - BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - ADDQ $4, SI - -less_than_4: - BTQ $1, CX - JNC less_than_2 - - // CRC32W (SI), AX - BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 - ADDQ $2, SI - -less_than_2: - BTQ $0, CX - JNC done - - CRC32B (SI), AX - -done: - MOVL AX, ret+32(FP) - RET - -// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) -// bytes from each buffer. -// -// func castagnoliSSE42Triple( -// crc1, crc2, crc3 uint32, -// a, b, c []byte, -// rounds uint32, -// ) (retA uint32, retB uint32, retC uint32) -TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 - MOVL crcA+0(FP), AX - MOVL crcB+4(FP), CX - MOVL crcC+8(FP), DX - - MOVQ a+16(FP), R8 // data pointer - MOVQ b+40(FP), R9 // data pointer - MOVQ c+64(FP), R10 // data pointer - - MOVL rounds+88(FP), R11 - -loop: - CRC32Q (R8), AX - CRC32Q (R9), CX - CRC32Q (R10), DX - - CRC32Q 8(R8), AX - CRC32Q 8(R9), CX - CRC32Q 8(R10), DX - - CRC32Q 16(R8), AX - CRC32Q 16(R9), CX - CRC32Q 16(R10), DX - - ADDQ $24, R8 - ADDQ $24, R9 - ADDQ $24, R10 - - DECQ R11 - JNZ loop - - MOVL AX, retA+96(FP) - MOVL CX, retB+100(FP) - MOVL DX, retC+104(FP) - RET - -// func haveSSE42() bool -TEXT ·haveSSE42(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $20, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// func haveCLMUL() bool -TEXT ·haveCLMUL(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $1, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// func haveSSE41() bool -TEXT ·haveSSE41(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $19, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// CRC32 polynomial data -// -// These constants are lifted from the -// Linux kernel, since they avoid the costly -// PSHUFB 16 byte reversal proposed in the -// original Intel paper. -DATA r2r1kp<>+0(SB)/8, $0x154442bd4 -DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 -DATA r4r3kp<>+0(SB)/8, $0x1751997d0 -DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e -DATA rupolykp<>+0(SB)/8, $0x1db710641 -DATA rupolykp<>+8(SB)/8, $0x1f7011641 -DATA r5kp<>+0(SB)/8, $0x163cd6124 - -GLOBL r2r1kp<>(SB), RODATA, $16 -GLOBL r4r3kp<>(SB), RODATA, $16 -GLOBL rupolykp<>(SB), RODATA, $16 -GLOBL r5kp<>(SB), RODATA, $8 - -// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf -// len(p) must be at least 64, and must be a multiple of 16. - -// func ieeeCLMUL(crc uint32, p []byte) uint32 -TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 - MOVL crc+0(FP), X0 // Initial CRC value - MOVQ p+8(FP), SI // data pointer - MOVQ p_len+16(FP), CX // len(p) - - MOVOU (SI), X1 - MOVOU 16(SI), X2 - MOVOU 32(SI), X3 - MOVOU 48(SI), X4 - PXOR X0, X1 - ADDQ $64, SI // buf+=64 - SUBQ $64, CX // len-=64 - CMPQ CX, $64 // Less than 64 bytes left - JB remain64 - - MOVOA r2r1kp<>+0(SB), X0 - -loopback64: - MOVOA X1, X5 - MOVOA X2, X6 - MOVOA X3, X7 - MOVOA X4, X8 - - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0, X0, X2 - PCLMULQDQ $0, X0, X3 - PCLMULQDQ $0, X0, X4 - - // Load next early - MOVOU (SI), X11 - MOVOU 16(SI), X12 - MOVOU 32(SI), X13 - MOVOU 48(SI), X14 - - PCLMULQDQ $0x11, X0, X5 - PCLMULQDQ $0x11, X0, X6 - PCLMULQDQ $0x11, X0, X7 - PCLMULQDQ $0x11, X0, X8 - - PXOR X5, X1 - PXOR X6, X2 - PXOR X7, X3 - PXOR X8, X4 - - PXOR X11, X1 - PXOR X12, X2 - PXOR X13, X3 - PXOR X14, X4 - - ADDQ $0x40, DI - ADDQ $64, SI // buf+=64 - SUBQ $64, CX // len-=64 - CMPQ CX, $64 // Less than 64 bytes left? - JGE loopback64 - - // Fold result into a single register (X1) -remain64: - MOVOA r4r3kp<>+0(SB), X0 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X2, X1 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X3, X1 - - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X4, X1 - - // If there is less than 16 bytes left we are done - CMPQ CX, $16 - JB finish - - // Encode 16 bytes -remain16: - MOVOU (SI), X10 - MOVOA X1, X5 - PCLMULQDQ $0, X0, X1 - PCLMULQDQ $0x11, X0, X5 - PXOR X5, X1 - PXOR X10, X1 - SUBQ $16, CX - ADDQ $16, SI - CMPQ CX, $16 - JGE remain16 - -finish: - // Fold final result into 32 bits and return it - PCMPEQB X3, X3 - PCLMULQDQ $1, X1, X0 - PSRLDQ $8, X1 - PXOR X0, X1 - - MOVOA X1, X2 - MOVQ r5kp<>+0(SB), X0 - - // Creates 32 bit mask. Note that we don't care about upper half. - PSRLQ $32, X3 - - PSRLDQ $4, X2 - PAND X3, X1 - PCLMULQDQ $0, X0, X1 - PXOR X2, X1 - - MOVOA rupolykp<>+0(SB), X0 - - MOVOA X1, X2 - PAND X3, X1 - PCLMULQDQ $0x10, X0, X1 - PAND X3, X1 - PCLMULQDQ $0, X0, X1 - PXOR X2, X1 - - // PEXTRD $1, X1, AX (SSE 4.1) - BYTE $0x66; BYTE $0x0f; BYTE $0x3a - BYTE $0x16; BYTE $0xc8; BYTE $0x01 - MOVL AX, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go deleted file mode 100644 index 3222b06a..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine,!gccgo - -package crc32 - -// This file contains the code to call the SSE 4.2 version of the Castagnoli -// CRC. - -// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 -// support. -func haveSSE42() bool - -// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 -// instruction. -//go:noescape -func castagnoliSSE42(crc uint32, p []byte) uint32 - -var sse42 = haveSSE42() - -func archAvailableCastagnoli() bool { - return sse42 -} - -func archInitCastagnoli() { - if !sse42 { - panic("not available") - } - // No initialization necessary. -} - -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !sse42 { - panic("not available") - } - return castagnoliSSE42(crc, p) -} - -func archAvailableIEEE() bool { return false } -func archInitIEEE() { panic("not available") } -func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s deleted file mode 100644 index a578d685..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc - -#define NOSPLIT 4 -#define RODATA 8 - -// func castagnoliSSE42(crc uint32, p []byte) uint32 -TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 - MOVL crc+0(FP), AX // CRC value - MOVL p+4(FP), SI // data pointer - MOVL p_len+8(FP), CX // len(p) - - NOTL AX - - // If there's less than 8 bytes to process, we do it byte-by-byte. - CMPQ CX, $8 - JL cleanup - - // Process individual bytes until the input is 8-byte aligned. -startup: - MOVQ SI, BX - ANDQ $7, BX - JZ aligned - - CRC32B (SI), AX - DECQ CX - INCQ SI - JMP startup - -aligned: - // The input is now 8-byte aligned and we can process 8-byte chunks. - CMPQ CX, $8 - JL cleanup - - CRC32Q (SI), AX - ADDQ $8, SI - SUBQ $8, CX - JMP aligned - -cleanup: - // We may have some bytes left over that we process one at a time. - CMPQ CX, $0 - JE done - - CRC32B (SI), AX - INCQ SI - DECQ CX - JMP cleanup - -done: - NOTL AX - MOVL AX, ret+16(FP) - RET - -// func haveSSE42() bool -TEXT ·haveSSE42(SB), NOSPLIT, $0 - XORQ AX, AX - INCL AX - CPUID - SHRQ $20, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go deleted file mode 100644 index abacbb66..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_generic.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains CRC32 algorithms that are not specific to any architecture -// and don't use hardware acceleration. -// -// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. -// -// The slicing-by-8 algorithm is a faster implementation that uses a bigger -// table (8*256*4 bytes). - -package crc32 - -// simpleMakeTable allocates and constructs a Table for the specified -// polynomial. The table is suitable for use with the simple algorithm -// (simpleUpdate). -func simpleMakeTable(poly uint32) *Table { - t := new(Table) - simplePopulateTable(poly, t) - return t -} - -// simplePopulateTable constructs a Table for the specified polynomial, suitable -// for use with simpleUpdate. -func simplePopulateTable(poly uint32, t *Table) { - for i := 0; i < 256; i++ { - crc := uint32(i) - for j := 0; j < 8; j++ { - if crc&1 == 1 { - crc = (crc >> 1) ^ poly - } else { - crc >>= 1 - } - } - t[i] = crc - } -} - -// simpleUpdate uses the simple algorithm to update the CRC, given a table that -// was previously computed using simpleMakeTable. -func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { - crc = ^crc - for _, v := range p { - crc = tab[byte(crc)^v] ^ (crc >> 8) - } - return ^crc -} - -// Use slicing-by-8 when payload >= this value. -const slicing8Cutoff = 16 - -// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. -type slicing8Table [8]Table - -// slicingMakeTable constructs a slicing8Table for the specified polynomial. The -// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). -func slicingMakeTable(poly uint32) *slicing8Table { - t := new(slicing8Table) - simplePopulateTable(poly, &t[0]) - for i := 0; i < 256; i++ { - crc := t[0][i] - for j := 1; j < 8; j++ { - crc = t[0][crc&0xFF] ^ (crc >> 8) - t[j][i] = crc - } - } - return t -} - -// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a -// table that was previously computed using slicingMakeTable. -func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { - if len(p) >= slicing8Cutoff { - crc = ^crc - for len(p) > 8 { - crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 - crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ - tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ - tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] - p = p[8:] - } - crc = ^crc - } - if len(p) == 0 { - return crc - } - return simpleUpdate(crc, &tab[0], p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go deleted file mode 100644 index cc960764..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_otherarch.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!amd64p32,!s390x - -package crc32 - -func archAvailableIEEE() bool { return false } -func archInitIEEE() { panic("not available") } -func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } - -func archAvailableCastagnoli() bool { return false } -func archInitCastagnoli() { panic("not available") } -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go deleted file mode 100644 index ce96f032..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_s390x.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x - -package crc32 - -const ( - vxMinLen = 64 - vxAlignMask = 15 // align to 16 bytes -) - -// hasVectorFacility reports whether the machine has the z/Architecture -// vector facility installed and enabled. -func hasVectorFacility() bool - -var hasVX = hasVectorFacility() - -// vectorizedCastagnoli implements CRC32 using vector instructions. -// It is defined in crc32_s390x.s. -//go:noescape -func vectorizedCastagnoli(crc uint32, p []byte) uint32 - -// vectorizedIEEE implements CRC32 using vector instructions. -// It is defined in crc32_s390x.s. -//go:noescape -func vectorizedIEEE(crc uint32, p []byte) uint32 - -func archAvailableCastagnoli() bool { - return hasVX -} - -var archCastagnoliTable8 *slicing8Table - -func archInitCastagnoli() { - if !hasVX { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archCastagnoliTable8 = slicingMakeTable(Castagnoli) -} - -// archUpdateCastagnoli calculates the checksum of p using -// vectorizedCastagnoli. -func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !hasVX { - panic("not available") - } - // Use vectorized function if data length is above threshold. - if len(p) >= vxMinLen { - aligned := len(p) & ^vxAlignMask - crc = vectorizedCastagnoli(crc, p[:aligned]) - p = p[aligned:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archCastagnoliTable8, p) -} - -func archAvailableIEEE() bool { - return hasVX -} - -var archIeeeTable8 *slicing8Table - -func archInitIEEE() { - if !hasVX { - panic("not available") - } - // We still use slicing-by-8 for small buffers. - archIeeeTable8 = slicingMakeTable(IEEE) -} - -// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. -func archUpdateIEEE(crc uint32, p []byte) uint32 { - if !hasVX { - panic("not available") - } - // Use vectorized function if data length is above threshold. - if len(p) >= vxMinLen { - aligned := len(p) & ^vxAlignMask - crc = vectorizedIEEE(crc, p[:aligned]) - p = p[aligned:] - } - if len(p) == 0 { - return crc - } - return slicingUpdate(crc, archIeeeTable8, p) -} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s deleted file mode 100644 index e980ca29..00000000 --- a/vendor/github.com/klauspost/crc32/crc32_s390x.s +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x - -#include "textflag.h" - -// Vector register range containing CRC-32 constants - -#define CONST_PERM_LE2BE V9 -#define CONST_R2R1 V10 -#define CONST_R4R3 V11 -#define CONST_R5 V12 -#define CONST_RU_POLY V13 -#define CONST_CRC_POLY V14 - -// The CRC-32 constant block contains reduction constants to fold and -// process particular chunks of the input data stream in parallel. -// -// Note that the constant definitions below are extended in order to compute -// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. -// The rightmost doubleword can be 0 to prevent contribution to the result or -// can be multiplied by 1 to perform an XOR without the need for a separate -// VECTOR EXCLUSIVE OR instruction. -// -// The polynomials used are bit-reflected: -// -// IEEE: P'(x) = 0x0edb88320 -// Castagnoli: P'(x) = 0x082f63b78 - -// IEEE polynomial constants -DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask -DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 -DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 -DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 -DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 -DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 -DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 -DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' -DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 -DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 - -GLOBL ·crcleconskp(SB), RODATA, $144 - -// Castagonli Polynomial constants -DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask -DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 -DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 -DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 -DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 -DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 -DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 -DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' -DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 -DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 - -GLOBL ·crccleconskp(SB), RODATA, $144 - -// func hasVectorFacility() bool -TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 - MOVD $x-24(SP), R1 - XC $24, 0(R1), 0(R1) // clear the storage - MOVD $2, R0 // R0 is the number of double words stored -1 - WORD $0xB2B01000 // STFLE 0(R1) - XOR R0, R0 // reset the value of R0 - MOVBZ z-8(SP), R1 - AND $0x40, R1 - BEQ novector - -vectorinstalled: - // check if the vector instruction has been enabled - VLEIB $0, $0xF, V16 - VLGVB $0, V16, R1 - CMPBNE R1, $0xF, novector - MOVB $1, ret+0(FP) // have vx - RET - -novector: - MOVB $0, ret+0(FP) // no vx - RET - -// The CRC-32 function(s) use these calling conventions: -// -// Parameters: -// -// R2: Initial CRC value, typically ~0; and final CRC (return) value. -// R3: Input buffer pointer, performance might be improved if the -// buffer is on a doubleword boundary. -// R4: Length of the buffer, must be 64 bytes or greater. -// -// Register usage: -// -// R5: CRC-32 constant pool base pointer. -// V0: Initial CRC value and intermediate constants and results. -// V1..V4: Data for CRC computation. -// V5..V8: Next data chunks that are fetched from the input buffer. -// -// V9..V14: CRC-32 constants. - -// func vectorizedIEEE(crc uint32, p []byte) uint32 -TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 - MOVWZ crc+0(FP), R2 // R2 stores the CRC value - MOVD p+8(FP), R3 // data pointer - MOVD p_len+16(FP), R4 // len(p) - - MOVD $·crcleconskp(SB), R5 - BR vectorizedBody<>(SB) - -// func vectorizedCastagnoli(crc uint32, p []byte) uint32 -TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 - MOVWZ crc+0(FP), R2 // R2 stores the CRC value - MOVD p+8(FP), R3 // data pointer - MOVD p_len+16(FP), R4 // len(p) - - // R5: crc-32 constant pool base pointer, constant is used to reduce crc - MOVD $·crccleconskp(SB), R5 - BR vectorizedBody<>(SB) - -TEXT vectorizedBody<>(SB), NOSPLIT, $0 - XOR $0xffffffff, R2 // NOTW R2 - VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY - - // Load the initial CRC value into the rightmost word of V0 - VZERO V0 - VLVGF $3, R2, V0 - - // Crash if the input size is less than 64-bytes. - CMP R4, $64 - BLT crash - - // Load a 64-byte data chunk and XOR with CRC - VLM 0(R3), V1, V4 // 64-bytes into V1..V4 - - // Reflect the data if the CRC operation is in the bit-reflected domain - VPERM V1, V1, CONST_PERM_LE2BE, V1 - VPERM V2, V2, CONST_PERM_LE2BE, V2 - VPERM V3, V3, CONST_PERM_LE2BE, V3 - VPERM V4, V4, CONST_PERM_LE2BE, V4 - - VX V0, V1, V1 // V1 ^= CRC - ADD $64, R3 // BUF = BUF + 64 - ADD $(-64), R4 - - // Check remaining buffer size and jump to proper folding method - CMP R4, $64 - BLT less_than_64bytes - -fold_64bytes_loop: - // Load the next 64-byte data chunk into V5 to V8 - VLM 0(R3), V5, V8 - VPERM V5, V5, CONST_PERM_LE2BE, V5 - VPERM V6, V6, CONST_PERM_LE2BE, V6 - VPERM V7, V7, CONST_PERM_LE2BE, V7 - VPERM V8, V8, CONST_PERM_LE2BE, V8 - - // Perform a GF(2) multiplication of the doublewords in V1 with - // the reduction constants in V0. The intermediate result is - // then folded (accumulated) with the next data chunk in V5 and - // stored in V1. Repeat this step for the register contents - // in V2, V3, and V4 respectively. - - VGFMAG CONST_R2R1, V1, V5, V1 - VGFMAG CONST_R2R1, V2, V6, V2 - VGFMAG CONST_R2R1, V3, V7, V3 - VGFMAG CONST_R2R1, V4, V8, V4 - - // Adjust buffer pointer and length for next loop - ADD $64, R3 // BUF = BUF + 64 - ADD $(-64), R4 // LEN = LEN - 64 - - CMP R4, $64 - BGE fold_64bytes_loop - -less_than_64bytes: - // Fold V1 to V4 into a single 128-bit value in V1 - VGFMAG CONST_R4R3, V1, V2, V1 - VGFMAG CONST_R4R3, V1, V3, V1 - VGFMAG CONST_R4R3, V1, V4, V1 - - // Check whether to continue with 64-bit folding - CMP R4, $16 - BLT final_fold - -fold_16bytes_loop: - VL 0(R3), V2 // Load next data chunk - VPERM V2, V2, CONST_PERM_LE2BE, V2 - - VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk - - // Adjust buffer pointer and size for folding next data chunk - ADD $16, R3 - ADD $-16, R4 - - // Process remaining data chunks - CMP R4, $16 - BGE fold_16bytes_loop - -final_fold: - VLEIB $7, $0x40, V9 - VSRLB V9, CONST_R4R3, V0 - VLEIG $0, $1, V0 - - VGFMG V0, V1, V1 - - VLEIB $7, $0x20, V9 // Shift by words - VSRLB V9, V1, V2 // Store remaining bits in V2 - VUPLLF V1, V1 // Split rightmost doubleword - VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 - - // The input values to the Barret reduction are the degree-63 polynomial - // in V1 (R(x)), degree-32 generator polynomial, and the reduction - // constant u. The Barret reduction result is the CRC value of R(x) mod - // P(x). - // - // The Barret reduction algorithm is defined as: - // - // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u - // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) - // 3. C(x) = R(x) XOR T2(x) mod x^32 - // - // Note: To compensate the division by x^32, use the vector unpack - // instruction to move the leftmost word into the leftmost doubleword - // of the vector register. The rightmost doubleword is multiplied - // with zero to not contribute to the intermedate results. - - // T1(x) = floor( R(x) / x^32 ) GF2MUL u - VUPLLF V1, V2 - VGFMG CONST_RU_POLY, V2, V2 - - // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in - // V2 and XOR the intermediate result, T2(x), with the value in V1. - // The final result is in the rightmost word of V2. - - VUPLLF V2, V2 - VGFMAG CONST_CRC_POLY, V2, V1, V2 - -done: - VLGVF $2, V2, R2 - XOR $0xffffffff, R2 // NOTW R2 - MOVWZ R2, ret + 32(FP) - RET - -crash: - MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 00000000..5e987350 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml index 5fd62363..fd6c6db7 100644 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -1,9 +1,24 @@ language: go +env: + - GO111MODULE=off + go: - - 1.4 - - 1.5 + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + fast_finish: true + allow_failures: + - go: master + +sudo: false script: - - go test -cpu=2 - - go test -cpu=2 -race \ No newline at end of file + - go test -v -cpu=2 + - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md index dd3c9d47..4ee388e8 100644 --- a/vendor/github.com/pierrec/lz4/README.md +++ b/vendor/github.com/pierrec/lz4/README.md @@ -1,31 +1,90 @@ -[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) +# lz4 : LZ4 compression in pure Go + +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) [![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage -# lz4 -LZ4 compression and decompression in pure Go +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") -## Usage +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] -```go -import "github.com/pierrec/lz4" ``` -## Description -Package lz4 implements reading and writing lz4 compressed data (a frame), -as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, -using an io.Reader (decompression) and io.Writer (compression). -It is designed to minimize memory usage while maximizing throughput by being able to -[de]compress data concurrently. +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors -The Reader and the Writer support concurrent processing provided the supplied buffers are -large enough (in multiples of BlockMaxSize) and there is no block dependency. -Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. -The runtime.GOMAXPROCS() value is used to apply concurrency or not. +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! -Although the block level compression and decompression functions are exposed and are fully compatible -with the lz4 block format definition, they are low level and should not be used directly. -For a complete description of an lz4 compressed block, see: -http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. -See https://github.com/Cyan4973/lz4 for the reference C implementation. +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go index 6884bccd..664d9be5 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/block.go @@ -2,211 +2,150 @@ package lz4 import ( "encoding/binary" - "errors" - "unsafe" + "math/bits" + "sync" ) -// block represents a frame data block. -// Used when compressing or decompressing frame blocks concurrently. -type block struct { - compressed bool - zdata []byte // compressed data - data []byte // decompressed data - offset int // offset within the data as with block dependency the 64Kb window is prepended to it - checksum uint32 // compressed data checksum - err error // error while [de]compressing +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) } -var ( - // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. - ErrInvalidSource = errors.New("lz4: invalid source") - // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when - // the supplied buffer for [de]compression is too small. - ErrShortBuffer = errors.New("lz4: short buffer") -) - // CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. func CompressBlockBound(n int) int { return n + n/255 + 16 } -// UncompressBlock decompresses the source buffer into the destination one, -// starting at the di index and returning the decompressed size. +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. // // The destination buffer must be sized appropriately. // // An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte, di int) (int, error) { - si, sn, di0 := 0, len(src), di - if sn == 0 { +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { return 0, nil } - - for { - // literals and match lengths (token) - lLen := int(src[si] >> 4) - mLen := int(src[si] & 0xF) - if si++; si == sn { - return di, ErrInvalidSource - } - - // literals - if lLen > 0 { - if lLen == 0xF { - for src[si] == 0xFF { - lLen += 0xFF - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - lLen += int(src[si]) - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - if len(dst)-di < lLen || si+lLen > sn { - return di - di0, ErrShortBuffer - } - di += copy(dst[di:], src[si:si+lLen]) - - if si += lLen; si >= sn { - return di - di0, nil - } - } - - if si += 2; si >= sn { - return di, ErrInvalidSource - } - offset := int(src[si-2]) | int(src[si-1])<<8 - if di-offset < 0 || offset == 0 { - return di - di0, ErrInvalidSource - } - - // match - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - mLen += int(src[si]) - if si++; si == sn { - return di - di0, ErrInvalidSource - } - } - // minimum match length is 4 - mLen += 4 - if len(dst)-di <= mLen { - return di - di0, ErrShortBuffer - } - - // copy the match (NB. match is at least 4 bytes long) - // NB. past di, copy() would write old bytes instead of - // the ones we just copied, so split the work into the largest chunk. - for ; mLen >= offset; mLen -= offset { - di += copy(dst[di:], dst[di-offset:di]) - } - di += copy(dst[di:], dst[di-offset:di-offset+mLen]) + if di := decodeBlock(dst, src); di >= 0 { + return di, nil } + return 0, ErrInvalidSourceShortBuffer } -type hashEntry struct { - generation uint - value int -} - -// CompressBlock compresses the source buffer starting at soffet into the destination one. +// CompressBlock compresses the source buffer into the destination one. // This is the fast version of LZ4 compression and also the default one. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, soffset int) (int, error) { - var hashTable [hashTableSize]hashEntry - return compressGenerationalBlock(src, dst, soffset, 0, hashTable[:]) -} - -// getUint32 is a despicably evil function (well, for Go!) that takes advantage -// of the machine's byte order to save some operations. This may look -// inefficient but it is significantly faster on littleEndian machines, -// which include x84, amd64, and some ARM processors. -func getUint32(b []byte) uint32 { - _ = b[3] - if isLittleEndian { - return *(*uint32)(unsafe.Pointer(&b)) +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + if len(hashTable) < htSize { + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] } - - return uint32(b[0]) | - uint32(b[1])<<8 | - uint32(b[2])<<16 | - uint32(b[3])<<24 -} - -func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, hashTable []hashEntry) (int, error) { - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 || soffset >= sn { - return 0, nil - } - var si, di int - - // fast scan strategy: - // we only need a hash table to store the last sequences (4 bytes) - var hashShift = uint((minMatch * 8) - hashLog) - - // Initialise the hash table with the first 64Kb of the input buffer - // (used when compressing dependent blocks) - for si < soffset { - h := getUint32(src[si:]) * hasher >> hashShift - si++ - hashTable[h] = hashEntry{generation, si} + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals } - anchor := si - fma := 1 << skipStrength - for si < sn-minMatch { - // hash the next 4 bytes (sequence)... - h := getUint32(src[si:]) * hasher >> hashShift - if hashTable[h].generation != generation { - hashTable[h] = hashEntry{generation, 0} + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } } - // -1 to separate existing entries from new ones - ref := hashTable[h].value - 1 - // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) - hashTable[h].value = si + 1 - // no need to check the last 3 bytes in the first literal 4 bytes as - // this guarantees that the next match, if any, is compressed with - // a lower size, since to have some compression we must have: - // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) - // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap - // and by definition we do have: - // ll >= 1, ml >= 4 - // => ll+ml >= 5 - // => so overlap must be 0 - - // the sequence is new, out of bound (64kb) or not valid: try next sequence - if ref < 0 || fma&(1<>winSizeLog > 0 || - src[ref] != src[si] || - src[ref+1] != src[si+1] || - src[ref+2] != src[si+2] || - src[ref+3] != src[si+3] { - // variable step: improves performance on non-compressible data - si += fma >> skipStrength - fma++ - continue + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ } - // match found - fma = 1 << skipStrength - lLen := si - anchor - offset := si - ref - // encode match length part 1 - si += minMatch - mLen := si // match length has minMatch already - for si <= sn && src[si] == src[si-offset] { - si++ + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } } + mLen = si - mLen if mLen < 0xF { dst[di] = byte(mLen) @@ -214,169 +153,192 @@ func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, ha dst[di] = 0xF } - // encode literals length + // Encode literals length. if lLen < 0xF { dst[di] |= byte(lLen << 4) } else { dst[di] |= 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } + di++ l := lLen - 0xF for ; l >= 0xFF; l -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(l) } - if di++; di == dn { - return di, ErrShortBuffer - } + di++ - // literals - if di+lLen >= dn { - return di, ErrShortBuffer - } - di += copy(dst[di:], src[anchor:anchor+lLen]) + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 anchor = si - // encode offset - if di += 2; di >= dn { - return di, ErrShortBuffer - } + // Encode offset. + _ = dst[di] // Bound check elimination. dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - // encode match length part 2 + // Encode match length part 2. if mLen >= 0xF { for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(mLen) - if di++; di == dn { - return di, ErrShortBuffer - } + di++ + } + // Check if we can load next values. + if si >= sn { + break } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 } - if anchor == 0 { - // incompressible +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. return 0, nil } - // last literals + // Last literals. lLen := len(src) - anchor if lLen < 0xF { dst[di] = byte(lLen << 4) } else { dst[di] = 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(lLen) } - if di++; di == dn { - return di, ErrShortBuffer - } + di++ - // write literals - src = src[anchor:] - switch n := di + len(src); { - case n > dn: - return di, ErrShortBuffer - case n >= sn: - // incompressible + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. return 0, nil } - di += copy(dst[di:], src) + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) return di, nil } -// CompressBlockHC compresses the source buffer starting at soffet into the destination one. +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// // CompressBlockHC compression ratio is better than CompressBlock but it is also slower. // -// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. // // An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, soffset int) (int, error) { - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 || soffset >= sn { - return 0, nil +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize } - var si, di int - - // Hash Chain strategy: - // we need a hash table and a chain table - // the chain table cannot contain more entries than the window size (64Kb entries) - var hashTable [1 << hashLog]int - var chainTable [winSize]int - var hashShift = uint((minMatch * 8) - hashLog) - - // Initialise the hash table with the first 64Kb of the input buffer - // (used when compressing dependent blocks) - for si < soffset { - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift - chainTable[si&winMask] = hashTable[h] - si++ - hashTable[h] = si + + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals } - anchor := si - for si < sn-minMatch { - // hash the next 4 bytes (sequence)... - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) - // follow the chain until out of window and give the longest match + // Follow the chain until out of window and give the longest match. mLen := 0 offset := 0 - for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { - // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length - if src[next+mLen] == src[si+mLen] { - for ml := 0; ; ml++ { - if src[next+ml] != src[si+ml] || si+ml > sn { - // found a longer match, keep its position and length - if mLen < ml && ml >= minMatch { - mLen = ml - offset = si - next - } - break - } + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break } } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog continue } - // match found - // update hash/chain tables with overlaping bytes: - // si already hashed, add everything from si+1 up to the match length - for si, ml := si+1, si+mLen; si < ml; { - h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) chainTable[si&winMask] = hashTable[h] - si++ hashTable[h] = si + si++ } lLen := si - anchor si += mLen - mLen -= minMatch // match length does not include minMatch + mLen -= minMatch // Match length does not include minMatch. if mLen < 0xF { dst[di] = byte(mLen) @@ -384,91 +346,68 @@ func CompressBlockHC(src, dst []byte, soffset int) (int, error) { dst[di] = 0xF } - // encode literals length + // Encode literals length. if lLen < 0xF { dst[di] |= byte(lLen << 4) } else { dst[di] |= 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } + di++ l := lLen - 0xF for ; l >= 0xFF; l -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(l) } - if di++; di == dn { - return di, ErrShortBuffer - } + di++ - // literals - if di+lLen >= dn { - return di, ErrShortBuffer - } - di += copy(dst[di:], src[anchor:anchor+lLen]) + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen anchor = si - // encode offset - if di += 2; di >= dn { - return di, ErrShortBuffer - } + // Encode offset. + di += 2 dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - // encode match length part 2 + // Encode match length part 2. if mLen >= 0xF { for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(mLen) - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } } - if anchor == 0 { - // incompressible + if isNotCompressible && anchor == 0 { + // Incompressible. return 0, nil } - // last literals + // Last literals. +lastLiterals: lLen := len(src) - anchor if lLen < 0xF { dst[di] = byte(lLen << 4) } else { dst[di] = 0xF0 - if di++; di == dn { - return di, ErrShortBuffer - } + di++ lLen -= 0xF for ; lLen >= 0xFF; lLen -= 0xFF { dst[di] = 0xFF - if di++; di == dn { - return di, ErrShortBuffer - } + di++ } dst[di] = byte(lLen) } - if di++; di == dn { - return di, ErrShortBuffer - } + di++ - // write literals - src = src[anchor:] - switch n := di + len(src); { - case n > dn: - return di, ErrShortBuffer - case n >= sn: - // incompressible + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. return 0, nil } - di += copy(dst[di:], src) + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) return di, nil } diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 00000000..bc5e78d4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 00000000..44211ad9 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 00000000..43cc14fb --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 00000000..20fef397 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 00000000..919888ed --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 00000000..1c45d181 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 00000000..7a76a6bc --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go index 46389243..6c73539a 100644 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -1,13 +1,5 @@ // Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, -// using an io.Reader (decompression) and io.Writer (compression). -// It is designed to minimize memory usage while maximizing throughput by being able to -// [de]compress data concurrently. -// -// The Reader and the Writer support concurrent processing provided the supplied buffers are -// large enough (in multiples of BlockMaxSize) and there is no block dependency. -// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. -// The runtime.GOMAXPROCS() value is used to apply concurrency or not. +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. // // Although the block level compression and decompression functions are exposed and are fully compatible // with the lz4 block format definition, they are low level and should not be used directly. @@ -15,15 +7,12 @@ // http://fastcompression.blogspot.fr/2011/05/lz4-explained.html // // See https://github.com/Cyan4973/lz4 for the reference C implementation. +// package lz4 -import ( - "hash" - "sync" - "unsafe" +import "math/bits" - "github.com/pierrec/xxHash/xxHash32" -) +import "sync" const ( // Extension is the LZ4 frame file name extension @@ -31,88 +20,94 @@ const ( // Version is the LZ4 frame format version Version = 1 - frameMagic = uint32(0x184D2204) - frameSkipMagic = uint32(0x184D2A50) + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 // hashLog determines the size of the hash table used to quickly find a previous match position. // Its value influences the compression speed and memory usage, the lower the faster, // but at the expense of the compression ratio. - // 16 seems to be the best compromise. - hashLog = 16 - hashTableSize = 1 << hashLog - hashShift = uint((minMatch * 8) - hashLog) + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog - mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. - skipStrength = 6 // variable step for fast scan - - hasher = uint32(2654435761) // prime number used to hash minMatch + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. ) // map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} -var bsMapValue = map[int]byte{} +const ( + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M +) -// Reversed. -func init() { - for i, v := range bsMapID { - bsMapValue[v] = i +var ( + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } +) + +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, } } -var isLittleEndian = getIsLittleEndian() +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} -func getIsLittleEndian() (ret bool) { - var i int = 0x1 - bs := (*[1]byte)(unsafe.Pointer(&i)) - if bs[0] == 0 { - return false +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M - return true + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) } // Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). // // NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). +// It is the caller's responsibility to check them if necessary. type Header struct { - BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) - BlockChecksum bool // compressed blocks are checksumed - NoChecksum bool // frame checksum - BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // the frame total size. It is _not_ computed by the Writer. - HighCompression bool // use high compression (only for the Writer) - done bool // whether the descriptor was processed (Read or Write and checked) - // Removed as not supported - // Dict bool // a dictionary id is to be used - // DictID uint32 // the dictionary id read from the frame, if any. -} - -// xxhPool wraps the standard pool for xxHash items. -// Putting items back in the pool automatically resets them. -type xxhPool struct { - sync.Pool -} - -func (p *xxhPool) Get() hash.Hash32 { - return p.Pool.Get().(hash.Hash32) -} - -func (p *xxhPool) Put(h hash.Hash32) { - h.Reset() - p.Pool.Put(h) + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). } -// hashPool is used by readers and writers and contains xxHash items. -var hashPool = xxhPool{ - Pool: sync.Pool{ - New: func() interface{} { return xxHash32.New(0) }, - }, +func (h *Header) Reset() { + h.done = false } diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 00000000..9a0fb007 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 00000000..12c761a2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go index 9f7fd604..87dd72bd 100644 --- a/vendor/github.com/pierrec/lz4/reader.go +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -2,44 +2,38 @@ package lz4 import ( "encoding/binary" - "errors" "fmt" - "hash" "io" "io/ioutil" - "runtime" - "sync" - "sync/atomic" -) - -// ErrInvalid is returned when the data being read is not an LZ4 archive -// (LZ4 magic number detection failed). -var ErrInvalid = errors.New("invalid lz4 data") -// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. -// It is not an error. -var errEndOfBlock = errors.New("end of block") + "github.com/pierrec/lz4/internal/xxh32" +) // Reader implements the LZ4 frame decoder. // The Header is set after the first call to Read(). // The Header may change between Read() calls in case of concatenated frames. type Reader struct { - Pos int64 // position within the source Header - src io.Reader - checksum hash.Hash32 // frame hash - wg sync.WaitGroup // decompressing go routine wait group - data []byte // buffered decompressed data - window []byte // 64Kb decompressed data window + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest } // NewReader returns a new LZ4 frame decoder. // No access to the underlying io.Reader is performed. func NewReader(src io.Reader) *Reader { - return &Reader{ - src: src, - checksum: hashPool.Get(), - } + r := &Reader{src: src} + return r } // readHeader checks the frame magic number and parses the frame descriptoz. @@ -48,88 +42,95 @@ func NewReader(src io.Reader) *Reader { func (z *Reader) readHeader(first bool) error { defer z.checksum.Reset() + buf := z.buf[:] for { - var magic uint32 - if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 if !first && err == io.ErrUnexpectedEOF { return io.EOF } return err } - z.Pos += 4 - if magic>>8 == frameSkipMagic>>8 { - var skipSize uint32 - if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { - return err - } - z.Pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - z.Pos += m - if err != nil { - return err - } - continue + if magic == frameMagic { + break } - if magic != frameMagic { + if magic>>8 != frameSkipMagic>>8 { return ErrInvalid } - break + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m } - // header - var buf [8]byte + // Header. if _, err := io.ReadFull(z.src, buf[:2]); err != nil { return err } - z.Pos += 2 + z.pos += 8 b := buf[0] - if b>>6 != Version { - return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency } - z.BlockDependency = b>>5&1 == 0 z.BlockChecksum = b>>4&1 > 0 frameSize := b>>3&1 > 0 z.NoChecksum = b>>2&1 == 0 - // z.Dict = b&1 > 0 bmsID := buf[1] >> 4 & 0x7 - bSize, ok := bsMapID[bmsID] - if !ok { - return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) + if bmsID < 4 || bmsID > 7 { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) } + bSize := blockSizeIndexToValue(bmsID - 4) z.BlockMaxSize = bSize - z.checksum.Write(buf[0:2]) + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) if frameSize { - if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { return err } - z.Pos += 8 - binary.LittleEndian.PutUint64(buf[:], z.Size) - z.checksum.Write(buf[0:8]) + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) } - // if z.Dict { - // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { - // return err - // } - // z.Pos += 4 - // binary.LittleEndian.PutUint32(buf[:], z.DictID) - // z.checksum.Write(buf[0:4]) - // } - - // header checksum + // Header checksum. if _, err := io.ReadFull(z.src, buf[:1]); err != nil { return err } - z.Pos++ + z.pos++ if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) } z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } return nil } @@ -139,177 +140,176 @@ func (z *Reader) readHeader(first bool) error { // Since there can be multiple streams concatenated, Header values may // change between calls to Read(). If that is the case, no data is actually read from // the underlying io.Reader, to allow for potential input buffer resizing. -// -// Data is buffered if the input buffer is too small, and exhausted upon successive calls. -// -// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is -// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. -func (z *Reader) Read(buf []byte) (n int, err error) { +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } if !z.Header.done { - if err = z.readHeader(true); err != nil { - return + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) } } if len(buf) == 0 { - return + return 0, nil } - // exhaust remaining data from previous Read() - if len(z.data) > 0 { - n = copy(buf, z.data) - z.data = z.data[n:] - if len(z.data) == 0 { - z.data = nil + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") } - return - } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] - // Break up the input buffer into BlockMaxSize blocks with at least one block. - // Then decompress into each of them concurrently if possible (no dependency). - // In case of dependency, the first block will be missing the window (except on the - // very first call), the rest will have it already since it comes from the previous block. - wbuf := buf - zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize - zblocks := make([]block, zn) - for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { - zb := &zblocks[zi] - // last block may be too small - if len(wbuf) < z.BlockMaxSize+len(z.window) { - wbuf = make([]byte, z.BlockMaxSize+len(z.window)) - } - copy(wbuf, z.window) - if zb.err = z.readBlock(wbuf, zb); zb.err != nil { - break + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err } - wbuf = wbuf[z.BlockMaxSize:] - if !z.BlockDependency { - z.wg.Add(1) - go z.decompressBlock(zb, &abort) - continue + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) } - // cannot decompress concurrently when dealing with block dependency - z.decompressBlock(zb, nil) - // the last block may not contain enough data - if len(z.window) == 0 { - z.window = make([]byte, winSize) + + if debugFlag { + debug("raw block size %d", bLen) } - if len(zb.data) >= winSize { - copy(z.window, zb.data[len(zb.data)-winSize:]) + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + } else { - copy(z.window, z.window[len(zb.data):]) - copy(z.window[len(zb.data)+1:], zb.data) - } - } - z.wg.Wait() + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } - // since a block size may be less then BlockMaxSize, trim the decompressed buffers - for _, zb := range zblocks { - if zb.err != nil { - if zb.err == errEndOfBlock { - return n, z.close() + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) } - return n, zb.err - } - bLen := len(zb.data) - if !z.NoChecksum { - z.checksum.Write(zb.data) - } - m := copy(buf[n:], zb.data) - // buffer the remaining data (this is necessarily the last block) - if m < bLen { - z.data = zb.data[m:] } - n += m - } - return -} - -// readBlock reads an entire frame block from the frame. -// The input buffer is the one that will receive the decompressed data. -// If the end of the frame is detected, it returns the errEndOfBlock error. -func (z *Reader) readBlock(buf []byte, b *block) error { - var bLen uint32 - if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { - return err - } - atomic.AddInt64(&z.Pos, 4) - - switch { - case bLen == 0: - return errEndOfBlock - case bLen&(1<<31) == 0: - b.compressed = true - b.data = buf - b.zdata = make([]byte, bLen) - default: - bLen = bLen & (1<<31 - 1) - if int(bLen) > len(buf) { - return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } } - b.data = buf[:bLen] - b.zdata = buf[:bLen] - } - if _, err := io.ReadFull(z.src, b.zdata); err != nil { - return err + z.idx = 0 } - if z.BlockChecksum { - if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { - return err - } - xxh := hashPool.Get() - defer hashPool.Put(xxh) - xxh.Write(b.zdata) - if h := xxh.Sum32(); h != b.checksum { - return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) - } + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil } - return nil -} + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 -// decompressBlock decompresses a frame block. -// In case of an error, the block err is set with it and abort is set to 1. -func (z *Reader) decompressBlock(b *block, abort *uint32) { - if abort != nil { - defer z.wg.Done() + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) } - if b.compressed { - n := len(z.window) - m, err := UncompressBlock(b.zdata, b.data, n) - if err != nil { - if abort != nil { - atomic.StoreUint32(abort, 1) - } - b.err = err - return - } - b.data = b.data[n : n+m] - } - atomic.AddInt64(&z.Pos, int64(len(b.data))) + + return n, nil } -// close validates the frame checksum (if any) and checks the next frame (if any). -func (z *Reader) close() error { - if !z.NoChecksum { - var checksum uint32 - if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { - return err - } - if checksum != z.checksum.Sum32() { - return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) - } +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek } - - // get ready for the next concatenated frame, but do not change the position - pos := z.Pos - z.Reset(z.src) - z.Pos = pos - - // since multiple frames can be concatenated, check for another one - return z.readHeader(false) + z.skip += offset + return z.dpos + z.skip, nil } // Reset discards the Reader's state and makes it equivalent to the @@ -317,48 +317,19 @@ func (z *Reader) close() error { // This permits reusing a Reader rather than allocating a new one. func (z *Reader) Reset(r io.Reader) { z.Header = Header{} - z.Pos = 0 + z.pos = 0 z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 z.checksum.Reset() - z.data = nil - z.window = nil } -// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. -// Returns the number of bytes written. -func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { - cpus := runtime.GOMAXPROCS(0) - var buf []byte - - // The initial buffer being nil, the first Read will be only read the compressed frame options. - // The buffer can then be sized appropriately to support maximum concurrency decompression. - // If multiple frames are concatenated, Read() will return with no data decompressed but with - // potentially changed options. The buffer will be resized accordingly, always trying to - // maximize concurrency. - for { - nsize := 0 - // the block max size can change if multiple streams are concatenated. - // Check it after every Read(). - if z.BlockDependency { - // in case of dependency, we cannot decompress concurrently, - // so allocate the minimum buffer + window size - nsize = len(z.window) + z.BlockMaxSize - } else { - // if no dependency, allocate a buffer large enough for concurrent decompression - nsize = cpus * z.BlockMaxSize - } - if nsize != len(buf) { - buf = make([]byte, nsize) - } - - m, er := z.Read(buf) - if er != nil && er != io.EOF { - return n, er - } - m, err = w.Write(buf[:m]) - n += int64(m) - if err != nil || er == io.EOF { - return - } - } +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err } diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go index 11082f5a..324f1386 100644 --- a/vendor/github.com/pierrec/lz4/writer.go +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -3,23 +3,35 @@ package lz4 import ( "encoding/binary" "fmt" - "hash" + "github.com/pierrec/lz4/internal/xxh32" "io" "runtime" ) +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + // Writer implements the LZ4 frame encoder. type Writer struct { Header - dst io.Writer - checksum hash.Hash32 // frame checksum - data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with - window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer - - zbCompressBuf []byte // buffer for compressing lz4 blocks - writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock - hashTable []hashEntry - currentGeneration uint + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. } // NewWriter returns a new LZ4 frame encoder. @@ -27,39 +39,101 @@ type Writer struct { // The supplied Header is checked at the first Write. // It is ok to change it before the first Write but then not until a Reset() is performed. func NewWriter(dst io.Writer) *Writer { - return &Writer{ - dst: dst, - checksum: hashPool.Get(), - Header: Header{ - BlockMaxSize: 4 << 20, - }, - hashTable: make([]hashEntry, hashTableSize), - writeSizeBuf: make([]byte, 4), - } + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + } + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil } // writeHeader builds and writes the header (magic+header) to the underlying io.Writer. func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set + // Default to 4Mb if BlockMaxSize is not set. if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = 4 << 20 + z.Header.BlockMaxSize = blockSize4M } - // the only option that need to be validated - bSize, ok := bsMapValue[z.Header.BlockMaxSize] - if !ok { - return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + if !isValidBlockSize(z.Header.BlockMaxSize) { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + z.newBuffers() + z.idx = 0 - // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - // Size and DictID are optional - var buf [19]byte + // Size is optional. + buf := z.buf[:] - // set the fixed size data: magic number, block max size and flags + // Set the fixed size data: magic number, block max size and flags. binary.LittleEndian.PutUint32(buf[0:], frameMagic) flg := byte(Version << 6) - if !z.Header.BlockDependency { - flg |= 1 << 5 - } + flg |= 1 << 5 // No block dependency. if z.Header.BlockChecksum { flg |= 1 << 4 } @@ -69,259 +143,190 @@ func (z *Writer) writeHeader() error { if !z.Header.NoChecksum { flg |= 1 << 2 } - // if z.Header.Dict { - // flg |= 1 - // } buf[4] = flg - buf[5] = bSize << 4 + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 - // current buffer size: magic(4) + flags(1) + block max size (1) + // Current buffer size: magic(4) + flags(1) + block max size (1). n := 6 - // optional items + // Optional items. if z.Header.Size > 0 { binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) n += 8 } - // if z.Header.Dict { - // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) - // n += 4 - // } - // header checksum includes the flags, block max size and optional Size and DictID - z.checksum.Write(buf[4:n]) - buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) z.checksum.Reset() - // header ready, write it out + // Header ready, write it out. if _, err := z.dst.Write(buf[0 : n+1]); err != nil { return err } z.Header.done = true - - // initialize buffers dependent on header info - z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) + if debugFlag { + debug("wrote header %v", z.Header) + } return nil } // Write compresses data from the supplied buffer into the underlying io.Writer. // Write does not return until the data has been written. -// -// If the input buffer is large enough (typically in multiples of BlockMaxSize) -// the data will be compressed concurrently. -// -// Write never buffers any data unless in BlockDependency mode where it may -// do so until it has 64Kb of data, after which it never buffers any. -func (z *Writer) Write(buf []byte) (n int, err error) { +func (z *Writer) Write(buf []byte) (int, error) { if !z.Header.done { - if err = z.writeHeader(); err != nil { - return + if err := z.writeHeader(); err != nil { + return 0, err } } - - if len(buf) == 0 { - return + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) } - if !z.NoChecksum { - z.checksum.Write(buf) - } - - // with block dependency, require at least 64Kb of data to work with - // not having 64Kb only matters initially to setup the first window - bl := 0 - if z.BlockDependency && len(z.window) == 0 { - bl = len(z.data) - z.data = append(z.data, buf...) - if len(z.data) < winSize { - return len(buf), nil - } - buf = z.data - z.data = nil - } - - // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. - // Then compress into each of them concurrently if possible (no dependency). - var ( - zb block - wbuf = buf - zn = len(wbuf) / z.BlockMaxSize - zi = 0 - leftover = len(buf) % z.BlockMaxSize - ) - -loop: - for zi < zn { - if z.BlockDependency { - if zi == 0 { - // first block does not have the window - zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) - zb.offset = len(z.window) - wbuf = wbuf[z.BlockMaxSize-winSize:] - } else { - // set the uncompressed data including the window from previous block - zb.data = wbuf[:z.BlockMaxSize+winSize] - zb.offset = winSize - wbuf = wbuf[z.BlockMaxSize:] + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err } - } else { - zb.data = wbuf[:z.BlockMaxSize] - wbuf = wbuf[z.BlockMaxSize:] + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) } - goto write - } - - // left over - if leftover > 0 { - zb = block{data: wbuf} - if z.BlockDependency { - if zn == 0 { - zb.data = append(z.window, zb.data...) - zb.offset = len(z.window) - } else { - zb.offset = winSize + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") } + return n, nil } - leftover = 0 - goto write - } - - if z.BlockDependency { - if len(z.window) == 0 { - z.window = make([]byte, winSize) - } - // last buffer may be shorter than the window - if len(buf) >= winSize { - copy(z.window, buf[len(buf)-winSize:]) - } else { - copy(z.window, z.window[len(buf):]) - copy(z.window[len(buf)+1:], buf) + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err } + z.idx = 0 } - return - -write: - zb = z.compressBlock(zb) - _, err = z.writeBlock(zb) + return n, nil +} - written := len(zb.data) - if bl > 0 { - if written >= bl { - written -= bl - bl = 0 - } else { - bl -= written - written = 0 - } +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + _, _ = z.checksum.Write(data) } - n += written - // remove the window in zb.data - if z.BlockDependency { - if zi == 0 { - n -= len(z.window) - } else { - n -= winSize - } - } - if err != nil { - return + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + go writerCompressBlock(c, z.Header, data) + return nil } - zi++ - goto loop -} -// compressBlock compresses a block. -func (z *Writer) compressBlock(zb block) block { - // compressed block size cannot exceed the input's - var ( - n int - err error - zbuf = z.zbCompressBuf - ) - if z.HighCompression { - n, err = CompressBlockHC(zb.data, zbuf, zb.offset) + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) } else { - n, err = compressGenerationalBlock(zb.data, zbuf, zb.offset, z.currentGeneration, z.hashTable) - z.currentGeneration++ - if z.currentGeneration == 0 { // wrapped around, reset table - z.hashTable = make([]hashEntry, hashTableSize) - } + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) } - // compressible and compressed size smaller than decompressed: ok! - if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { - zb.compressed = true - zb.zdata = zbuf[:n] + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] } else { - zb.zdata = zb.data[zb.offset:] + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data } - - if z.BlockChecksum { - xxh := hashPool.Get() - xxh.Write(zb.zdata) - zb.checksum = xxh.Sum32() - hashPool.Put(xxh) + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) } - return zb -} - -// writeBlock writes a frame block to the underlying io.Writer (size, data). -func (z *Writer) writeBlock(zb block) (int, error) { - bLen := uint32(len(zb.zdata)) - if !zb.compressed { - bLen |= 1 << 31 + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err } - - n := 0 - - binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) - n, err := z.dst.Write(z.writeSizeBuf) + written, err := z.dst.Write(zdata) if err != nil { - return n, err + return err } - - m, err := z.dst.Write(zb.zdata) - n += m - if err != nil { - return n, err + if h := z.OnBlockDone; h != nil { + h(written) } - if z.BlockChecksum { - binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) - m, err := z.dst.Write(z.writeSizeBuf) - n += m - - if err != nil { - return n, err + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) } + return nil } - - return n, nil + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) } // Flush flushes any pending compressed data to the underlying writer. // Flush does not return until the data has been written. // If the underlying writer returns an error, Flush returns that error. -// -// Flush is only required when in BlockDependency mode and the total of -// data written is less than 64Kb. func (z *Writer) Flush() error { - if len(z.data) == 0 { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { return nil } - zb := z.compressBlock(block{data: z.data}) - if _, err := z.writeBlock(zb); err != nil { - return err + data := z.data[:z.idx] + z.idx = 0 + if z.c == nil { + return z.compressBlock(data) } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) return nil } +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + // Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. func (z *Writer) Close() error { if !z.Header.done { @@ -329,55 +334,75 @@ func (z *Writer) Close() error { return err } } - - // buffered data for the block dependency window - if z.BlockDependency && len(z.data) > 0 { - zb := block{data: z.data} - if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { - return err - } + if err := z.Flush(); err != nil { + return err + } + if err := z.close(); err != nil { + return err } + z.freeBuffers() - if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { return err } - if !z.NoChecksum { - if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { - return err - } + if z.NoChecksum { + return nil } - return nil + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) } // Reset clears the state of the Writer z such that it is equivalent to its // initial state from NewWriter, but instead writing to w. // No access to the underlying io.Writer is performed. func (z *Writer) Reset(w io.Writer) { - z.Header = Header{} + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() z.dst = w z.checksum.Reset() - z.data = nil - z.window = nil + z.idx = 0 + z.err = nil + z.WithConcurrency(n) } -// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. -// Returns the number of bytes read. -// It does not close the Writer. -func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { - cpus := runtime.GOMAXPROCS(0) - buf := make([]byte, cpus*z.BlockMaxSize) - for { - m, er := io.ReadFull(r, buf) - n += int64(m) - if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { - if _, err = z.Write(buf[:m]); err != nil { - return - } - if er == nil { - continue - } - return - } - return n, er +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) } + c <- res } diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE deleted file mode 100644 index c1418f3f..00000000 --- a/vendor/github.com/pierrec/xxHash/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2014, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go deleted file mode 100644 index 411504e4..00000000 --- a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/xxHash/) -package xxHash32 - -import "hash" - -const ( - prime32_1 = 2654435761 - prime32_2 = 2246822519 - prime32_3 = 3266489917 - prime32_4 = 668265263 - prime32_5 = 374761393 -) - -type xxHash struct { - seed uint32 - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// New returns a new Hash32 instance. -func New(seed uint32) hash.Hash32 { - xxh := &xxHash{seed: seed} - xxh.Reset() - return xxh -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh xxHash) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *xxHash) Reset() { - xxh.v1 = xxh.seed + prime32_1 + prime32_2 - xxh.v2 = xxh.seed + prime32_2 - xxh.v3 = xxh.seed - xxh.v4 = xxh.seed - prime32_1 - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *xxHash) Size() int { - return 4 -} - -// BlockSize gives the minimum number of bytes accepted by Write(). -func (xxh *xxHash) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *xxHash) Write(input []byte) (int, error) { - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - p := 0 - if m > 0 { - // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r - - // fast rotl(13) - p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v1 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v2 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v3 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2 - xxh.v4 = (p32<<13 | p32>>19) * prime32_1 - - p = r - xxh.bufused = 0 - } - - for n := n - 16; p <= n; { - p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v1 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v2 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v3 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2 - xxh.v4 = (p32<<13 | p32>>19) * prime32_1 - p += 4 - } - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *xxHash) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if xxh.totalLen >= 16 { - h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) + - ((xxh.v2 << 7) | (xxh.v2 >> 25)) + - ((xxh.v3 << 12) | (xxh.v3 >> 20)) + - ((xxh.v4 << 18) | (xxh.v4 >> 14)) - } else { - h32 += xxh.seed + prime32_5 - } - - p := 0 - n := xxh.bufused - for n := n - 4; p <= n; p += 4 { - h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3 - h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 - } - for ; p < n; p++ { - h32 += uint32(xxh.buf[p]) * prime32_5 - h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} - -// Checksum returns the 32bits Hash value. -func Checksum(input []byte, seed uint32) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += seed + prime32_5 - } else { - v1 := seed + prime32_1 + prime32_2 - v2 := seed + prime32_2 - v3 := seed - v4 := seed - prime32_1 - p := 0 - for p <= n-16 { - v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v1 = (v1<<13 | v1>>19) * prime32_1 - p += 4 - v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v2 = (v2<<13 | v2>>19) * prime32_1 - p += 4 - v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v3 = (v3<<13 | v3>>19) * prime32_1 - p += 4 - v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2 - v4 = (v4<<13 | v4>>19) * prime32_1 - p += 4 - } - input = input[p:] - n -= p - h32 += ((v1 << 1) | (v1 >> 31)) + - ((v2 << 7) | (v2 >> 25)) + - ((v3 << 12) | (v3 >> 20)) + - ((v4 << 18) | (v4 >> 14)) - } - - p := 0 - for p <= n-4 { - h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3 - h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4 - p += 4 - } - for p < n { - h32 += uint32(input[p]) * prime32_5 - h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml index 20aa5d04..409a5b63 100644 --- a/vendor/github.com/rcrowley/go-metrics/.travis.yml +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -1,10 +1,18 @@ language: go go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 + - "1.3" + - "1.4" + - "1.5" + - "1.6" + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - "1.13" + - "1.14" script: - ./validate.sh diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index 2d1a6dcf..27ddfee8 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -42,12 +42,22 @@ t.Update(47) Register() is not threadsafe. For threadsafe metric registration use GetOrRegister: -``` +```go t := metrics.GetOrRegisterTimer("account.create.latency", nil) t.Time(func() {}) t.Update(47) ``` +**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will +leak memory: + +```go +// Will call Stop() on the Meter to allow for garbage collection +metrics.Unregister("quux") +// Or similarly for a Timer that embeds a Meter +metrics.Unregister("bang") +``` + Periodically log every metric in human-readable form to standard error: ```go @@ -81,12 +91,13 @@ issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and ```go import "github.com/vrischmann/go-metrics-influxdb" -go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ - Host: "127.0.0.1:8086", - Database: "metrics", - Username: "test", - Password: "test", -}) +go influxdb.InfluxDB(metrics.DefaultRegistry, + 10e9, + "127.0.0.1:8086", + "database-name", + "username", + "password" +) ``` Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): @@ -146,8 +157,15 @@ Publishing Metrics Clients are available for the following destinations: -* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato) -* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite) -* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb) -* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia) -* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus) +* AppOptics - https://github.com/ysamlan/go-metrics-appoptics +* Librato - https://github.com/mihasya/go-metrics-librato +* Graphite - https://github.com/cyberdelia/go-metrics-graphite +* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb +* Ganglia - https://github.com/appscode/metlia +* Prometheus - https://github.com/deathowl/go-metrics-prometheus +* DataDog - https://github.com/syntaqx/go-metrics-datadog +* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx +* Honeycomb - https://github.com/getspine/go-metrics-honeycomb +* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront +* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon +* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics) diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go index 043ccefa..179e5aae 100644 --- a/vendor/github.com/rcrowley/go-metrics/debug.go +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -2,6 +2,7 @@ package metrics import ( "runtime/debug" + "sync" "time" ) @@ -16,7 +17,8 @@ var ( } ReadGCStats Timer } - gcStats debug.GCStats + gcStats debug.GCStats + registerDebugMetricsOnce = sync.Once{} ) // Capture new values for the Go garbage collector statistics exported in @@ -54,19 +56,21 @@ func CaptureDebugGCStatsOnce(r Registry) { // debug.GCStats. The metrics are named by their fully-qualified Go symbols, // i.e. debug.GCStats.PauseTotal. func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() + registerDebugMetricsOnce.Do(func() { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) + }) } // Allocate an initial slice for gcStats.Pause to avoid allocations during diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go index 694a1d03..a8183dd7 100644 --- a/vendor/github.com/rcrowley/go-metrics/ewma.go +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -79,16 +79,15 @@ func (NilEWMA) Update(n int64) {} type StandardEWMA struct { uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment alpha float64 - rate float64 - init bool + rate uint64 + init uint32 mutex sync.Mutex } // Rate returns the moving average rate of events per second. func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9) + return currentRate } // Snapshot returns a read-only copy of the EWMA. @@ -99,17 +98,38 @@ func (a *StandardEWMA) Snapshot() EWMA { // Tick ticks the clock to update the moving average. It assumes it is called // every five seconds. func (a *StandardEWMA) Tick() { + // Optimization to avoid mutex locking in the hot-path. + if atomic.LoadUint32(&a.init) == 1 { + a.updateRate(a.fetchInstantRate()) + } else { + // Slow-path: this is only needed on the first Tick() and preserves transactional updating + // of init and rate in the else block. The first conditional is needed below because + // a different thread could have set a.init = 1 between the time of the first atomic load and when + // the lock was acquired. + a.mutex.Lock() + if atomic.LoadUint32(&a.init) == 1 { + // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section + // but again, this section is only invoked on the first successful Tick() operation. + a.updateRate(a.fetchInstantRate()) + } else { + atomic.StoreUint32(&a.init, 1) + atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate())) + } + a.mutex.Unlock() + } +} + +func (a *StandardEWMA) fetchInstantRate() float64 { count := atomic.LoadInt64(&a.uncounted) atomic.AddInt64(&a.uncounted, -count) instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } + return instantRate +} + +func (a *StandardEWMA) updateRate(instantRate float64) { + currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) + currentRate += a.alpha * (instantRate - currentRate) + atomic.StoreUint64(&a.rate, math.Float64bits(currentRate)) } // Update adds n uncounted events. diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go index 6f93920b..3962e6db 100644 --- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -1,6 +1,9 @@ package metrics -import "sync" +import ( + "math" + "sync/atomic" +) // GaugeFloat64s hold a float64 value that can be set arbitrarily. type GaugeFloat64 interface { @@ -85,8 +88,7 @@ func (NilGaugeFloat64) Value() float64 { return 0.0 } // StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses // sync.Mutex to manage a single float64 value. type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 + value uint64 } // Snapshot returns a read-only copy of the gauge. @@ -96,16 +98,12 @@ func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { // Update updates the gauge's value. func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v + atomic.StoreUint64(&g.value, math.Float64bits(v)) } // Value returns the gauge's current value. func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value + return math.Float64frombits(atomic.LoadUint64(&g.value)) } // FunctionalGaugeFloat64 returns value from given function diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go index 2fdcbcfb..174b9477 100644 --- a/vendor/github.com/rcrowley/go-metrics/json.go +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -9,63 +9,7 @@ import ( // MarshalJSON returns a byte slice containing a JSON representation of all // the metrics in the Registry. func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return json.Marshal(data) + return json.Marshal(r.GetAll()) } // WriteJSON writes metrics from the given registry periodically to the @@ -83,5 +27,5 @@ func WriteJSONOnce(r Registry, w io.Writer) { } func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(p.underlying) + return json.Marshal(p.GetAll()) } diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go index f8074c04..2614a0a3 100644 --- a/vendor/github.com/rcrowley/go-metrics/log.go +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -8,17 +8,37 @@ type Logger interface { Printf(format string, v ...interface{}) } +// Log outputs each metric in the given registry periodically using the given logger. func Log(r Registry, freq time.Duration, l Logger) { LogScaled(r, freq, time.Nanosecond, l) } -// Output each metric in the given registry periodically using the given +// LogOnCue outputs each metric in the given registry on demand through the channel +// using the given logger +func LogOnCue(r Registry, ch chan interface{}, l Logger) { + LogScaledOnCue(r, ch, time.Nanosecond, l) +} + +// LogScaled outputs each metric in the given registry periodically using the given // logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + ch := make(chan interface{}) + go func(channel chan interface{}) { + for _ = range time.Tick(freq) { + channel <- struct{}{} + } + }(ch) + LogScaledOnCue(r, ch, scale, l) +} + +// LogScaledOnCue outputs each metric in the given registry on demand through the channel +// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather +// than nanos. +func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) { du := float64(scale) duSuffix := scale.String()[1:] - for _ = range time.Tick(freq) { + for _ = range ch { r.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go index 0389ab0b..223669bc 100644 --- a/vendor/github.com/rcrowley/go-metrics/meter.go +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -1,7 +1,9 @@ package metrics import ( + "math" "sync" + "sync/atomic" "time" ) @@ -15,10 +17,13 @@ type Meter interface { Rate15() float64 RateMean() float64 Snapshot() Meter + Stop() } // GetOrRegisterMeter returns an existing Meter or constructs and registers a // new StandardMeter. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func GetOrRegisterMeter(name string, r Registry) Meter { if nil == r { r = DefaultRegistry @@ -27,6 +32,7 @@ func GetOrRegisterMeter(name string, r Registry) Meter { } // NewMeter constructs a new StandardMeter and launches a goroutine. +// Be sure to call Stop() once the meter is of no use to allow for garbage collection. func NewMeter() Meter { if UseNilMetrics { return NilMeter{} @@ -34,7 +40,7 @@ func NewMeter() Meter { m := newStandardMeter() arbiter.Lock() defer arbiter.Unlock() - arbiter.meters = append(arbiter.meters, m) + arbiter.meters[m] = struct{}{} if !arbiter.started { arbiter.started = true go arbiter.tick() @@ -44,6 +50,8 @@ func NewMeter() Meter { // NewMeter constructs and registers a new StandardMeter and launches a // goroutine. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func NewRegisteredMeter(name string, r Registry) Meter { c := NewMeter() if nil == r { @@ -56,7 +64,7 @@ func NewRegisteredMeter(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { count int64 - rate1, rate5, rate15, rateMean float64 + rate1, rate5, rate15, rateMean uint64 } // Count returns the count of events at the time the snapshot was taken. @@ -69,23 +77,26 @@ func (*MeterSnapshot) Mark(n int64) { // Rate1 returns the one-minute moving average rate of events per second at the // time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } +func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) } // Rate5 returns the five-minute moving average rate of events per second at // the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } +func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) } // Rate15 returns the fifteen-minute moving average rate of events per second // at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } +func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) } // RateMean returns the meter's mean rate of events per second at the time the // snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } +func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) } // Snapshot returns the snapshot. func (m *MeterSnapshot) Snapshot() Meter { return m } +// Stop is a no-op. +func (m *MeterSnapshot) Stop() {} + // NilMeter is a no-op Meter. type NilMeter struct{} @@ -110,12 +121,15 @@ func (NilMeter) RateMean() float64 { return 0.0 } // Snapshot is a no-op. func (NilMeter) Snapshot() Meter { return NilMeter{} } +// Stop is a no-op. +func (NilMeter) Stop() {} + // StandardMeter is the standard implementation of a Meter. type StandardMeter struct { - lock sync.RWMutex snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time + stopped uint32 } func newStandardMeter() *StandardMeter { @@ -128,19 +142,28 @@ func newStandardMeter() *StandardMeter { } } +// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. +func (m *StandardMeter) Stop() { + if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) { + arbiter.Lock() + delete(arbiter.meters, m) + arbiter.Unlock() + } +} + // Count returns the number of events recorded. func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count + return atomic.LoadInt64(&m.snapshot.count) } // Mark records the occurance of n events. func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - m.snapshot.count += n + if atomic.LoadUint32(&m.stopped) == 1 { + return + } + + atomic.AddInt64(&m.snapshot.count, n) + m.a1.Update(n) m.a5.Update(n) m.a15.Update(n) @@ -149,70 +172,65 @@ func (m *StandardMeter) Mark(n int64) { // Rate1 returns the one-minute moving average rate of events per second. func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1)) } // Rate5 returns the five-minute moving average rate of events per second. func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5)) } // Rate15 returns the fifteen-minute moving average rate of events per second. func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15)) } // RateMean returns the meter's mean rate of events per second. func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean + return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean)) } // Snapshot returns a read-only copy of the meter. func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot + copiedSnapshot := MeterSnapshot{ + count: atomic.LoadInt64(&m.snapshot.count), + rate1: atomic.LoadUint64(&m.snapshot.rate1), + rate5: atomic.LoadUint64(&m.snapshot.rate5), + rate15: atomic.LoadUint64(&m.snapshot.rate15), + rateMean: atomic.LoadUint64(&m.snapshot.rateMean), + } + return &copiedSnapshot } func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() + rate1 := math.Float64bits(m.a1.Rate()) + rate5 := math.Float64bits(m.a5.Rate()) + rate15 := math.Float64bits(m.a15.Rate()) + rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds()) + + atomic.StoreUint64(&m.snapshot.rate1, rate1) + atomic.StoreUint64(&m.snapshot.rate5, rate5) + atomic.StoreUint64(&m.snapshot.rate15, rate15) + atomic.StoreUint64(&m.snapshot.rateMean, rateMean) } func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() m.a1.Tick() m.a5.Tick() m.a15.Tick() m.updateSnapshot() } +// meterArbiter ticks meters every 5s from a single goroutine. +// meters are references in a set for future stopping. type meterArbiter struct { sync.RWMutex started bool - meters []*StandardMeter + meters map[*StandardMeter]struct{} ticker *time.Ticker } -var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} +var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} // Ticks meters on the scheduled interval func (ma *meterArbiter) tick() { @@ -227,7 +245,7 @@ func (ma *meterArbiter) tick() { func (ma *meterArbiter) tickMeters() { ma.RLock() defer ma.RUnlock() - for _, meter := range ma.meters { + for meter := range ma.meters { meter.tick() } } diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go index 2bb7a1e7..a8e67228 100644 --- a/vendor/github.com/rcrowley/go-metrics/registry.go +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -29,6 +29,9 @@ type Registry interface { // Get the metric by the given name or nil if none is registered. Get(string) interface{} + // GetAll metrics in the Registry. + GetAll() map[string]map[string]interface{} + // Gets an existing metric or registers the given one. // The interface can be the metric to register if not found in registry, // or a function returning the metric for lazy instantiation. @@ -51,7 +54,7 @@ type Registry interface { // of names to metrics. type StandardRegistry struct { metrics map[string]interface{} - mutex sync.Mutex + mutex sync.RWMutex } // Create a new registry. @@ -61,15 +64,17 @@ func NewRegistry() Registry { // Call the given function for each registered metric. func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) + metrics := r.registered() + for i := range metrics { + kv := &metrics[i] + f(kv.name, kv.value) } } // Get the metric by the given name or nil if none is registered. func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() + r.mutex.RLock() + defer r.mutex.RUnlock() return r.metrics[name] } @@ -78,6 +83,15 @@ func (r *StandardRegistry) Get(name string) interface{} { // The interface can be the metric to register if not found in registry, // or a function returning the metric for lazy instantiation. func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + // access the read lock first which should be re-entrant + r.mutex.RLock() + metric, ok := r.metrics[name] + r.mutex.RUnlock() + if ok { + return metric + } + + // only take the write lock if we'll be modifying the metrics map r.mutex.Lock() defer r.mutex.Unlock() if metric, ok := r.metrics[name]; ok { @@ -100,8 +114,8 @@ func (r *StandardRegistry) Register(name string, i interface{}) error { // Run all registered healthchecks. func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() + r.mutex.RLock() + defer r.mutex.RUnlock() for _, i := range r.metrics { if h, ok := i.(Healthcheck); ok { h.Check() @@ -109,10 +123,72 @@ func (r *StandardRegistry) RunHealthchecks() { } } +// GetAll metrics in the Registry +func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return data +} + // Unregister the metric with the given name. func (r *StandardRegistry) Unregister(name string) { r.mutex.Lock() defer r.mutex.Unlock() + r.stop(name) delete(r.metrics, name) } @@ -121,6 +197,7 @@ func (r *StandardRegistry) UnregisterAll() { r.mutex.Lock() defer r.mutex.Unlock() for name, _ := range r.metrics { + r.stop(name) delete(r.metrics, name) } } @@ -136,16 +213,37 @@ func (r *StandardRegistry) register(name string, i interface{}) error { return nil } -func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) +type metricKV struct { + name string + value interface{} +} + +func (r *StandardRegistry) registered() []metricKV { + r.mutex.RLock() + defer r.mutex.RUnlock() + metrics := make([]metricKV, 0, len(r.metrics)) for name, i := range r.metrics { - metrics[name] = i + metrics = append(metrics, metricKV{ + name: name, + value: i, + }) } return metrics } +func (r *StandardRegistry) stop(name string) { + if i, ok := r.metrics[name]; ok { + if s, ok := i.(Stoppable); ok { + s.Stop() + } + } +} + +// Stoppable defines the metrics which has to be stopped. +type Stoppable interface { + Stop() +} + type PrefixedRegistry struct { underlying Registry prefix string @@ -216,6 +314,11 @@ func (r *PrefixedRegistry) RunHealthchecks() { r.underlying.RunHealthchecks() } +// GetAll metrics in the Registry +func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { + return r.underlying.GetAll() +} + // Unregister the metric with the given name. The name will be prefixed. func (r *PrefixedRegistry) Unregister(name string) { realName := r.prefix + name diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go index 11c6b785..4047ab3d 100644 --- a/vendor/github.com/rcrowley/go-metrics/runtime.go +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -3,6 +3,7 @@ package metrics import ( "runtime" "runtime/pprof" + "sync" "time" ) @@ -49,7 +50,8 @@ var ( numGC uint32 numCgoCalls int64 - threadCreateProfile = pprof.Lookup("threadcreate") + threadCreateProfile = pprof.Lookup("threadcreate") + registerRuntimeMetricsOnce = sync.Once{} ) // Capture new values for the Go runtime statistics exported in @@ -146,67 +148,69 @@ func CaptureRuntimeMemStatsOnce(r Registry) { // specifically runtime.MemStats. The runtimeMetrics are named by their // fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() + registerRuntimeMetricsOnce.Do(func() { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) + }) } diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go index 17db8f8d..d6ec4c62 100644 --- a/vendor/github.com/rcrowley/go-metrics/timer.go +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -19,6 +19,7 @@ type Timer interface { RateMean() float64 Snapshot() Timer StdDev() float64 + Stop() Sum() int64 Time(func()) Update(time.Duration) @@ -28,6 +29,8 @@ type Timer interface { // GetOrRegisterTimer returns an existing Timer or constructs and registers a // new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func GetOrRegisterTimer(name string, r Registry) Timer { if nil == r { r = DefaultRegistry @@ -36,6 +39,7 @@ func GetOrRegisterTimer(name string, r Registry) Timer { } // NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. func NewCustomTimer(h Histogram, m Meter) Timer { if UseNilMetrics { return NilTimer{} @@ -47,6 +51,8 @@ func NewCustomTimer(h Histogram, m Meter) Timer { } // NewRegisteredTimer constructs and registers a new StandardTimer. +// Be sure to unregister the meter from the registry once it is of no use to +// allow for garbage collection. func NewRegisteredTimer(name string, r Registry) Timer { c := NewTimer() if nil == r { @@ -58,6 +64,7 @@ func NewRegisteredTimer(name string, r Registry) Timer { // NewTimer constructs a new StandardTimer using an exponentially-decaying // sample with the same reservoir size and alpha as UNIX load averages. +// Be sure to call Stop() once the timer is of no use to allow for garbage collection. func NewTimer() Timer { if UseNilMetrics { return NilTimer{} @@ -112,6 +119,9 @@ func (NilTimer) Snapshot() Timer { return NilTimer{} } // StdDev is a no-op. func (NilTimer) StdDev() float64 { return 0.0 } +// Stop is a no-op. +func (NilTimer) Stop() {} + // Sum is a no-op. func (NilTimer) Sum() int64 { return 0 } @@ -201,6 +211,11 @@ func (t *StandardTimer) StdDev() float64 { return t.histogram.StdDev() } +// Stop stops the meter. +func (t *StandardTimer) Stop() { + t.meter.Stop() +} + // Sum returns the sum in the sample. func (t *StandardTimer) Sum() int64 { return t.histogram.Sum() @@ -288,6 +303,9 @@ func (t *TimerSnapshot) Snapshot() Timer { return t } // was taken. func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } +// Stop is a no-op. +func (t *TimerSnapshot) Stop() {} + // Sum returns the sum at the time the snapshot was taken. func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh index f6499982..c4ae91e6 100644 --- a/vendor/github.com/rcrowley/go-metrics/validate.sh +++ b/vendor/github.com/rcrowley/go-metrics/validate.sh @@ -7,4 +7,4 @@ GOFMT_LINES=`gofmt -l . | wc -l | xargs` test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" # run the tests for the root package -go test . +go test -race . diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 00000000..2b00ddba --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 00000000..1fbd3e97 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go new file mode 100644 index 00000000..59d34806 --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4.go @@ -0,0 +1,122 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package md4 implements the MD4 hash algorithm as defined in RFC 1320. +// +// Deprecated: MD4 is cryptographically broken and should should only be used +// where compatibility with legacy systems, not security, is the goal. Instead, +// use a secure hash like SHA-256 (from crypto/sha256). +package md4 // import "golang.org/x/crypto/md4" + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.MD4, New) +} + +// The size of an MD4 checksum in bytes. +const Size = 16 + +// The blocksize of MD4 in bytes. +const BlockSize = 64 + +const ( + _Chunk = 64 + _Init0 = 0x67452301 + _Init1 = 0xEFCDAB89 + _Init2 = 0x98BADCFE + _Init3 = 0x10325476 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [4]uint32 + x [_Chunk]byte + nx int + len uint64 +} + +func (d *digest) Reset() { + d.s[0] = _Init0 + d.s[1] = _Init1 + d.s[2] = _Init2 + d.s[3] = _Init3 + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the MD4 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > _Chunk-d.nx { + n = _Chunk - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == _Chunk { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0, so that caller can keep writing and summing. + d := new(digest) + *d = *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + len := d.len + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + for _, s := range d.s { + in = append(in, byte(s>>0)) + in = append(in, byte(s>>8)) + in = append(in, byte(s>>16)) + in = append(in, byte(s>>24)) + } + return in +} diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go new file mode 100644 index 00000000..3fed475f --- /dev/null +++ b/vendor/golang.org/x/crypto/md4/md4block.go @@ -0,0 +1,89 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// MD4 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package md4 + +var shift1 = []uint{3, 7, 11, 19} +var shift2 = []uint{3, 5, 9, 13} +var shift3 = []uint{3, 9, 11, 15} + +var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} +var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} + +func _Block(dig *digest, p []byte) int { + a := dig.s[0] + b := dig.s[1] + c := dig.s[2] + d := dig.s[3] + n := 0 + var X [16]uint32 + for len(p) >= _Chunk { + aa, bb, cc, dd := a, b, c, d + + j := 0 + for i := 0; i < 16; i++ { + X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // If this needs to be made faster in the future, + // the usual trick is to unroll each of these + // loops by a factor of 4; that lets you replace + // the shift[] lookups with constants and, + // with suitable variable renaming in each + // unrolled body, delete the a, b, c, d = d, a, b, c + // (or you can let the optimizer do the renaming). + // + // The index variables are uint so that % by a power + // of two can be optimized easily by a compiler. + + // Round 1. + for i := uint(0); i < 16; i++ { + x := i + s := shift1[i%4] + f := ((c ^ d) & b) ^ d + a += f + X[x] + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 2. + for i := uint(0); i < 16; i++ { + x := xIndex2[i] + s := shift2[i%4] + g := (b & c) | (b & d) | (c & d) + a += g + X[x] + 0x5a827999 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + // Round 3. + for i := uint(0); i < 16; i++ { + x := xIndex3[i] + s := shift3[i%4] + h := b ^ c ^ d + a += h + X[x] + 0x6ed9eba1 + a = a<>(32-s) + a, b, c, d = d, a, b, c + } + + a += aa + b += bb + c += cc + d += dd + + p = p[_Chunk:] + n += _Chunk + } + + dig.s[0] = a + dig.s[1] = b + dig.s[2] = c + dig.s[3] = d + return n +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000..593f6530 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 00000000..3d6f516a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 00000000..97db2340 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 00000000..811c2e4e --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 00000000..3d66bdef --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 00000000..573fe79e --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 00000000..9ff4b9a7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 00000000..c91651f9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore b/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore new file mode 100644 index 00000000..a1338d68 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE b/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/README.md b/vendor/gopkg.in/jcmturner/aescts.v1/README.md new file mode 100644 index 00000000..d1fddf3a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/aescts.v1/README.md @@ -0,0 +1,16 @@ +# AES CBC Ciphertext Stealing +[![GoDoc](https://godoc.org/gopkg.in/jcmturner/aescts.v1?status.svg)](https://godoc.org/gopkg.in/jcmturner/aescts.v1) [![Go Report Card](https://goreportcard.com/badge/gopkg.in/jcmturner/aescts.v1)](https://goreportcard.com/report/gopkg.in/jcmturner/aescts.v1) + +Encrypt and decrypt data using AES CBC Ciphertext stealing mode. + +Reference: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + +To get the package, execute: +``` +go get gopkg.in/jcmturner/aescts.v1 +``` +To import this package, add the following line to your code: +```go +import "gopkg.in/jcmturner/aescts.v1" + +``` \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go b/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go new file mode 100644 index 00000000..278713ea --- /dev/null +++ b/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go @@ -0,0 +1,186 @@ +// Package aescts provides AES CBC CipherText Stealing encryption and decryption methods +package aescts + +import ( + "crypto/aes" + "crypto/cipher" + "errors" + "fmt" +) + +// Encrypt the message with the key and the initial vector. +// Returns: next iv, ciphertext bytes, error +func Encrypt(key, iv, plaintext []byte) ([]byte, []byte, error) { + l := len(plaintext) + + block, err := aes.NewCipher(key) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("Error creating cipher: %v", err) + } + mode := cipher.NewCBCEncrypter(block, iv) + + m := make([]byte, len(plaintext)) + copy(m, plaintext) + + /*For consistency, ciphertext stealing is always used for the last two + blocks of the data to be encrypted, as in [RC5]. If the data length + is a multiple of the block size, this is equivalent to plain CBC mode + with the last two ciphertext blocks swapped.*/ + /*The initial vector carried out from one encryption for use in a + subsequent encryption is the next-to-last block of the encryption + output; this is the encrypted form of the last plaintext block.*/ + if l <= aes.BlockSize { + m, _ = zeroPad(m, aes.BlockSize) + mode.CryptBlocks(m, m) + return m, m, nil + } + if l%aes.BlockSize == 0 { + mode.CryptBlocks(m, m) + iv = m[len(m)-aes.BlockSize:] + rb, _ := swapLastTwoBlocks(m, aes.BlockSize) + return iv, rb, nil + } + m, _ = zeroPad(m, aes.BlockSize) + rb, pb, lb, err := tailBlocks(m, aes.BlockSize) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("Error tailing blocks: %v", err) + } + var ct []byte + if rb != nil { + // Encrpt all but the lats 2 blocks and update the rolling iv + mode.CryptBlocks(rb, rb) + iv = rb[len(rb)-aes.BlockSize:] + mode = cipher.NewCBCEncrypter(block, iv) + ct = append(ct, rb...) + } + mode.CryptBlocks(pb, pb) + mode = cipher.NewCBCEncrypter(block, pb) + mode.CryptBlocks(lb, lb) + // Cipher Text Stealing (CTS) - Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + // Swap the last two cipher blocks + // Truncate the ciphertext to the length of the original plaintext + ct = append(ct, lb...) + ct = append(ct, pb...) + return lb, ct[:l], nil +} + +// Decrypt the ciphertext with the key and the initial vector. +func Decrypt(key, iv, ciphertext []byte) ([]byte, error) { + // Copy the cipher text as golang slices even when passed by value to this method can result in the backing arrays of the calling code value being updated. + ct := make([]byte, len(ciphertext)) + copy(ct, ciphertext) + if len(ct) < aes.BlockSize { + return []byte{}, fmt.Errorf("Ciphertext is not large enough. It is less that one block size. Blocksize:%v; Ciphertext:%v", aes.BlockSize, len(ct)) + } + // Configure the CBC + block, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf("Error creating cipher: %v", err) + } + var mode cipher.BlockMode + + //If ciphertext is multiple of blocksize we just need to swap back the last two blocks and then do CBC + //If the ciphertext is just one block we can't swap so we just decrypt + if len(ct)%aes.BlockSize == 0 { + if len(ct) > aes.BlockSize { + ct, _ = swapLastTwoBlocks(ct, aes.BlockSize) + } + mode = cipher.NewCBCDecrypter(block, iv) + message := make([]byte, len(ct)) + mode.CryptBlocks(message, ct) + return message[:len(ct)], nil + } + + // Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing + // Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb) + crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize) + v := make([]byte, len(iv), len(iv)) + copy(v, iv) + var message []byte + if crb != nil { + //If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later + rb := make([]byte, len(crb)) + mode = cipher.NewCBCDecrypter(block, v) + v = crb[len(crb)-aes.BlockSize:] + mode.CryptBlocks(rb, crb) + message = append(message, rb...) + } + + // We need to modify the cipher text + // Decryt the 2nd to last (penultimate) block with a the original iv + pb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(pb, cpb) + // number of byte needed to pad + npb := aes.BlockSize - len(ct)%aes.BlockSize + //pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block + clb = append(clb, pb[len(pb)-npb:]...) + + // Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros) + // iv for the penultimate block decrypted in the last position becomes the modified last block + lb := make([]byte, aes.BlockSize) + mode = cipher.NewCBCDecrypter(block, v) + v = clb + mode.CryptBlocks(lb, clb) + message = append(message, lb...) + + // Now decrypt the penultimate block in the last position (iv will be from the modified last block) + mode = cipher.NewCBCDecrypter(block, v) + mode.CryptBlocks(cpb, cpb) + message = append(message, cpb...) + + // Truncate to the size of the original cipher text + return message[:len(ct)], nil +} + +func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) { + if len(b) <= c { + return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail") + } + // Get size of last block + var lbs int + if l := len(b) % aes.BlockSize; l == 0 { + lbs = aes.BlockSize + } else { + lbs = l + } + // Get last block + lb := b[len(b)-lbs:] + // Get 2nd to last (penultimate) block + pb := b[len(b)-lbs-c : len(b)-lbs] + if len(b) > 2*c { + rb := b[:len(b)-lbs-c] + return rb, pb, lb, nil + } + return nil, pb, lb, nil +} + +func swapLastTwoBlocks(b []byte, c int) ([]byte, error) { + rb, pb, lb, err := tailBlocks(b, c) + if err != nil { + return nil, err + } + var out []byte + if rb != nil { + out = append(out, rb...) + } + out = append(out, lb...) + out = append(out, pb...) + return out, nil +} + +// zeroPad pads bytes with zeros to nearest multiple of message size m. +func zeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore b/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore new file mode 100644 index 00000000..a1338d68 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml b/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml new file mode 100644 index 00000000..cab4f7b8 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml @@ -0,0 +1,24 @@ +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - master + +gobuild_args: -tags=integration -race + +sudo: required + +services: + - docker + +before_install: + - docker pull jcmturner/gokrb5:dns + - docker run -d -h kdc.test.gokrb5 -v /etc/localtime:/etc/localtime:ro -e "TEST_KDC_ADDR=127.0.0.1" -p 53:53 -p 53:53/udp --name dns jcmturner/gokrb5:dns + +before_script: + - sudo sed -i 's/nameserver .*/nameserver 127.0.0.1/g' /etc/resolv.conf + +env: + - DNSUTILS_OVERRIDE_NS="127.0.0.1:53" \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE b/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go b/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go new file mode 100644 index 00000000..15ea912d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go @@ -0,0 +1,95 @@ +package dnsutils + +import ( + "math/rand" + "net" + "sort" +) + +// OrderedSRV returns a count of the results and a map keyed on the order they should be used. +// This based on the records' priority and randomised selection based on their relative weighting. +// The function's inputs are the same as those for net.LookupSRV +// To use in the correct order: +// +// count, orderedSRV, err := OrderedSRV(service, proto, name) +// i := 1 +// for i <= count { +// srv := orderedSRV[i] +// // Do something such as dial this SRV. If fails move on the the next or break if it succeeds. +// i += 1 +// } +func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) { + _, addrs, err := net.LookupSRV(service, proto, name) + if err != nil { + return 0, make(map[int]*net.SRV), err + } + index, osrv := orderSRV(addrs) + return index, osrv, nil +} + +func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) { + // Initialise the ordered map + var o int + osrv := make(map[int]*net.SRV) + + prioMap := make(map[int][]*net.SRV, 0) + for _, srv := range addrs { + prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv) + } + + priorities := make([]int, 0) + for p := range prioMap { + priorities = append(priorities, p) + } + + var count int + sort.Ints(priorities) + for _, p := range priorities { + tos := weightedOrder(prioMap[p]) + for i, s := range tos { + count += 1 + osrv[o+i] = s + } + o += len(tos) + } + return count, osrv +} + +func weightedOrder(srvs []*net.SRV) map[int]*net.SRV { + // Get the total weight + var tw int + for _, s := range srvs { + tw += int(s.Weight) + } + + // Initialise the ordered map + o := 1 + osrv := make(map[int]*net.SRV) + + // Whilst there are still entries to be ordered + l := len(srvs) + for l > 0 { + i := rand.Intn(l) + s := srvs[i] + var rw int + if tw > 0 { + // Greater the weight the more likely this will be zero or less + rw = rand.Intn(tw) - int(s.Weight) + } + if rw <= 0 { + // Put entry in position + osrv[o] = s + if len(srvs) > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1] + srvs = srvs[:len(srvs)-1] + l = len(srvs) + } else { + l = 0 + } + o += 1 + tw = tw - int(s.Weight) + } + } + return osrv +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE b/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go new file mode 100644 index 00000000..f27740b9 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go @@ -0,0 +1,86 @@ +// Package asn1tools provides tools for managing ASN1 marshaled data. +package asn1tools + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l' +// +// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1). +// +// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length. +// +// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first. +func MarshalLengthBytes(l int) []byte { + if l <= 127 { + return []byte{byte(l)} + } + var b []byte + p := 1 + for i := 1; i < 127; { + b = append([]byte{byte((l % (p * 256)) / p)}, b...) + p = p * 256 + l = l - l%p + if l <= 0 { + break + } + } + return append([]byte{byte(128 + len(b))}, b...) +} + +// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains. +func GetLengthFromASN(b []byte) int { + if int(b[1]) <= 127 { + return int(b[1]) + } + // The bytes that indicate the length + lb := b[2 : 2+int(b[1])-128] + base := 1 + l := 0 + for i := len(lb) - 1; i >= 0; i-- { + l += int(lb[i]) * base + base = base * 256 + } + return l +} + +// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length. +func GetNumberBytesInLengthHeader(b []byte) int { + if int(b[1]) <= 127 { + return 1 + } + // The bytes that indicate the length + return 1 + int(b[1]) - 128 +} + +// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided. +func AddASNAppTag(b []byte, tag int) []byte { + r := asn1.RawValue{ + Class: asn1.ClassApplication, + IsCompound: true, + Tag: tag, + Bytes: b, + } + ab, _ := asn1.Marshal(r) + return ab +} + +/* +// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag. +// This method adds that wrapping tag. +func AddASNAppTag(b []byte, tag int) []byte { + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - Application Tag + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Bit: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + // Therefore the value of the byte is an integer = ( Application tag value + 96 ) + //b = append(MarshalLengthBytes(int(b[1])+2), b...) + b = append(MarshalLengthBytes(len(b)), b...) + b = append([]byte{byte(96 + tag)}, b...) + return b +} +*/ diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go new file mode 100644 index 00000000..9d1a2f3f --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go @@ -0,0 +1,189 @@ +package client + +import ( + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/patype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// ASExchange performs an AS exchange for the client to retrieve a TGT. +func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) { + if ok, err := cl.IsConfigured(); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be performed") + } + + // Set PAData if required + err := setPAData(cl, nil, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: issue with setting PAData on AS_REQ") + } + + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ") + } + var ASRep messages.ASRep + + rb, err := cl.sendToKDC(b, realm) + if err != nil { + if e, ok := err.(messages.KRBError); ok { + switch e.ErrorCode { + case errorcode.KDC_ERR_PREAUTH_REQUIRED, errorcode.KDC_ERR_PREAUTH_FAILED: + // From now on assume this client will need to do this pre-auth and set the PAData + cl.settings.assumePreAuthentication = true + err = setPAData(cl, &e, &ASReq) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required") + } + b, err := ASReq.Marshal() + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData") + } + rb, err = cl.sendToKDC(b, realm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + case errorcode.KDC_ERR_WRONG_REALM: + // Client referral https://tools.ietf.org/html/rfc6806.html#section-7 + if referral > 5 { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded") + } + referral++ + return cl.ASExchange(e.CRealm, ASReq, referral) + default: + return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") + } + } else { + return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") + } + } + err = ASRep.Unmarshal(rb) + if err != nil { + return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP") + } + if ok, err := ASRep.Verify(cl.Config, cl.Credentials, ASReq); !ok { + return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect") + } + return ASRep, nil +} + +// setPAData adds pre-authentication data to the AS_REQ. +func setPAData(cl *Client, krberr *messages.KRBError, ASReq *messages.ASReq) error { + if !cl.settings.DisablePAFXFAST() { + pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP} + ASReq.PAData = append(ASReq.PAData, pa) + } + if cl.settings.AssumePreAuthentication() { + // Identify the etype to use to encrypt the PA Data + var et etype.EType + var err error + var key types.EncryptionKey + if krberr == nil { + // This is not in response to an error from the KDC. It is preemptive or renewal + // There is no KRB Error that tells us the etype to use + etn := cl.settings.preAuthEType // Use the etype that may have previously been negotiated + if etn == 0 { + etn = int32(cl.Config.LibDefaults.PreferredPreauthTypes[0]) // Resort to config + } + et, err = crypto.GetEtype(etn) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + key, err = cl.Key(et, nil) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } else { + // Get the etype to use from the PA data in the KRBError e-data + et, err = preAuthEType(krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") + } + cl.settings.preAuthEType = et.GetETypeID() // Set the etype that has been defined for potential future use + key, err = cl.Key(et, krberr) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") + } + } + // Generate the PA data + paTSb, err := types.GetPAEncTSEncAsnMarshalled() + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication") + } + //TODO (theme: KVNO from keytab) the kvno should not be hard coded to 1 as this hampers troubleshooting. + paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, 1) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp") + } + pb, err := paEncTS.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data") + } + pa := types.PAData{ + PADataType: patype.PA_ENC_TIMESTAMP, + PADataValue: pb, + } + // Look for and delete any exiting patype.PA_ENC_TIMESTAMP + for i, pa := range ASReq.PAData { + if pa.PADataType == patype.PA_ENC_TIMESTAMP { + ASReq.PAData[i] = ASReq.PAData[len(ASReq.PAData)-1] + ASReq.PAData = ASReq.PAData[:len(ASReq.PAData)-1] + } + } + ASReq.PAData = append(ASReq.PAData, pa) + } + return nil +} + +// preAuthEType establishes what encryption type to use for pre-authentication from the KRBError returned from the KDC. +func preAuthEType(krberr *messages.KRBError) (etype etype.EType, err error) { + //The preferred ordering of the "hint" pre-authentication data that + //affect client key selection is: ETYPE-INFO2, followed by ETYPE-INFO, + //followed by PW-SALT. + //A KDC SHOULD NOT send PA-PW-SALT when issuing a KRB-ERROR message + //that requests additional pre-authentication. Implementation note: + //Some KDC implementations issue an erroneous PA-PW-SALT when issuing a + //KRB-ERROR message that requests additional pre-authentication. + //Therefore, clients SHOULD ignore a PA-PW-SALT accompanying a + //KRB-ERROR message that requests additional pre-authentication. + var etypeID int32 + var pas types.PADataSequence + e := pas.Unmarshal(krberr.EData) + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data") + return + } + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_ETYPE_INFO2: + info, e := pa.GetETypeInfo2() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data") + return + } + etypeID = info[0].EType + break + case patype.PA_ETYPE_INFO: + info, e := pa.GetETypeInfo() + if e != nil { + err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data") + return + } + etypeID = info[0].EType + } + } + etype, e = crypto.GetEtype(etypeID) + if e != nil { + err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype") + return + } + return etype, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go new file mode 100644 index 00000000..93ff1dba --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go @@ -0,0 +1,103 @@ +package client + +import ( + "gopkg.in/jcmturner/gokrb5.v7/iana/flags" + "gopkg.in/jcmturner/gokrb5.v7/iana/nametype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// TGSREQGenerateAndExchange generates the TGS_REQ and performs a TGS exchange to retrieve a ticket to the specified SPN. +func (cl *Client) TGSREQGenerateAndExchange(spn types.PrincipalName, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, renewal bool) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) { + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, spn, renewal) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ") + } + return cl.TGSExchange(tgsReq, kdcRealm, tgsRep.Ticket, sessionKey, 0) +} + +// TGSExchange exchanges the provided TGS_REQ with the KDC to retrieve a TGS_REP. +// Referrals are automatically handled. +// The client's cache is updated with the ticket received. +func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, referral int) (messages.TGSReq, messages.TGSRep, error) { + var tgsRep messages.TGSRep + b, err := tgsReq.Marshal() + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to marshal TGS_REQ") + } + r, err := cl.sendToKDC(b, kdcRealm) + if err != nil { + if _, ok := err.(messages.KRBError); ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC when requesting for %s", tgsReq.ReqBody.SName.PrincipalNameString()) + } + return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC") + } + err = tgsRep.Unmarshal(r) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + err = tgsRep.DecryptEncPart(sessionKey) + if err != nil { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") + } + if ok, err := tgsRep.Verify(cl.Config, tgsReq); !ok { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid") + } + + if tgsRep.Ticket.SName.NameString[0] == "krbtgt" && !tgsRep.Ticket.SName.Equal(tgsReq.ReqBody.SName) { + if referral > 5 { + return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: maximum number of referrals exceeded") + } + // Server referral https://tools.ietf.org/html/rfc6806.html#section-8 + // The TGS Rep contains a TGT for another domain as the service resides in that domain. + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + realm := tgsRep.Ticket.SName.NameString[len(tgsRep.Ticket.SName.NameString)-1] + referral++ + if types.IsFlagSet(&tgsReq.ReqBody.KDCOptions, flags.EncTktInSkey) && len(tgsReq.ReqBody.AdditionalTickets) > 0 { + tgsReq, err = messages.NewUser2UserTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal, tgsReq.ReqBody.AdditionalTickets[0]) + if err != nil { + return tgsReq, tgsRep, err + } + } + tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), realm, cl.Config, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, tgsReq.ReqBody.SName, tgsReq.Renewal) + if err != nil { + return tgsReq, tgsRep, err + } + return cl.TGSExchange(tgsReq, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, referral) + } + cl.cache.addEntry( + tgsRep.Ticket, + tgsRep.DecryptedEncPart.AuthTime, + tgsRep.DecryptedEncPart.StartTime, + tgsRep.DecryptedEncPart.EndTime, + tgsRep.DecryptedEncPart.RenewTill, + tgsRep.DecryptedEncPart.Key, + ) + cl.Log("ticket added to cache for %s (EndTime: %v)", tgsRep.Ticket.SName.PrincipalNameString(), tgsRep.DecryptedEncPart.EndTime) + return tgsReq, tgsRep, err +} + +// GetServiceTicket makes a request to get a service ticket for the SPN specified +// SPN format: / Eg. HTTP/www.example.com +// The ticket will be added to the client's ticket cache +func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { + var tkt messages.Ticket + var skey types.EncryptionKey + if tkt, skey, ok := cl.GetCachedTicket(spn); ok { + // Already a valid ticket in the cache + return tkt, skey, nil + } + princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1]) + + tgt, skey, err := cl.sessionTGT(realm) + if err != nil { + return tkt, skey, err + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(princ, realm, tgt, skey, false) + if err != nil { + return tkt, skey, err + } + return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go new file mode 100644 index 00000000..07b4a01d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go @@ -0,0 +1,110 @@ +package client + +import ( + "errors" + "sync" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// Cache for service tickets held by the client. +type Cache struct { + Entries map[string]CacheEntry + mux sync.RWMutex +} + +// CacheEntry holds details for a cache entry. +type CacheEntry struct { + Ticket messages.Ticket + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + SessionKey types.EncryptionKey +} + +// NewCache creates a new client ticket cache instance. +func NewCache() *Cache { + return &Cache{ + Entries: map[string]CacheEntry{}, + } +} + +// getEntry returns a cache entry that matches the SPN. +func (c *Cache) getEntry(spn string) (CacheEntry, bool) { + c.mux.RLock() + defer c.mux.RUnlock() + e, ok := (*c).Entries[spn] + return e, ok +} + +// addEntry adds a ticket to the cache. +func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry { + spn := tkt.SName.PrincipalNameString() + c.mux.Lock() + defer c.mux.Unlock() + (*c).Entries[spn] = CacheEntry{ + Ticket: tkt, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + SessionKey: sessionKey, + } + return c.Entries[spn] +} + +// clear deletes all the cache entries +func (c *Cache) clear() { + c.mux.Lock() + defer c.mux.Unlock() + for k := range c.Entries { + delete(c.Entries, k) + } +} + +// RemoveEntry removes the cache entry for the defined SPN. +func (c *Cache) RemoveEntry(spn string) { + c.mux.Lock() + defer c.mux.Unlock() + delete(c.Entries, spn) +} + +// GetCachedTicket returns a ticket from the cache for the SPN. +// Only a ticket that is currently valid will be returned. +func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) { + if e, ok := cl.cache.getEntry(spn); ok { + //If within time window of ticket return it + if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) { + cl.Log("ticket received from cache for %s", spn) + return e.Ticket, e.SessionKey, true + } else if time.Now().UTC().Before(e.RenewTill) { + e, err := cl.renewTicket(e) + if err != nil { + return e.Ticket, e.SessionKey, false + } + return e.Ticket, e.SessionKey, true + } + } + var tkt messages.Ticket + var key types.EncryptionKey + return tkt, key, false +} + +// renewTicket renews a cache entry ticket. +// To renew from outside the client package use GetCachedTicket +func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) { + spn := e.Ticket.SName + _, _, err := cl.TGSREQGenerateAndExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true) + if err != nil { + return e, err + } + e, ok := cl.cache.getEntry(e.Ticket.SName.PrincipalNameString()) + if !ok { + return e, errors.New("ticket was not added to cache") + } + cl.Log("ticket renewed for %s (EndTime: %v)", spn.PrincipalNameString(), e.EndTime) + return e, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go new file mode 100644 index 00000000..cc931748 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go @@ -0,0 +1,241 @@ +// Package client provides a client library and methods for Kerberos 5 authentication. +package client + +import ( + "errors" + "fmt" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/iana/nametype" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// Client side configuration and state. +type Client struct { + Credentials *credentials.Credentials + Config *config.Config + settings *Settings + sessions *sessions + cache *Cache +} + +// NewClientWithPassword creates a new client from a password credential. +// Set the realm to empty string to use the default realm from config. +func NewClientWithPassword(username, realm, password string, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithPassword(password), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewClientWithKeytab creates a new client from a keytab credential. +func NewClientWithKeytab(username, realm string, kt *keytab.Keytab, krb5conf *config.Config, settings ...func(*Settings)) *Client { + creds := credentials.New(username, realm) + return &Client{ + Credentials: creds.WithKeytab(kt), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } +} + +// NewClientFromCCache create a client from a populated client cache. +// +// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires. +func NewClientFromCCache(c *credentials.CCache, krb5conf *config.Config, settings ...func(*Settings)) (*Client, error) { + cl := &Client{ + Credentials: c.GetClientCredentials(), + Config: krb5conf, + settings: NewSettings(settings...), + sessions: &sessions{ + Entries: make(map[string]*session), + }, + cache: NewCache(), + } + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", c.DefaultPrincipal.Realm}, + } + cred, ok := c.GetEntry(spn) + if !ok { + return cl, errors.New("TGT not found in CCache") + } + var tgt messages.Ticket + err := tgt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err) + } + cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{ + realm: c.DefaultPrincipal.Realm, + authTime: cred.AuthTime, + endTime: cred.EndTime, + renewTill: cred.RenewTill, + tgt: tgt, + sessionKey: cred.Key, + } + for _, cred := range c.GetEntries() { + var tkt messages.Ticket + err = tkt.Unmarshal(cred.Ticket) + if err != nil { + return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err) + } + cl.cache.addEntry( + tkt, + cred.AuthTime, + cred.StartTime, + cred.EndTime, + cred.RenewTill, + cred.Key, + ) + } + return cl, nil +} + +// Key returns the client's encryption key for the specified encryption type. +// The key can be retrieved either from the keytab or generated from the client's password. +// If the client has both a keytab and a password defined the keytab is favoured as the source for the key +// A KRBError can be passed in the event the KDC returns one of type KDC_ERR_PREAUTH_REQUIRED and is required to derive +// the key for pre-authentication from the client's password. If a KRBError is not available, pass nil to this argument. +func (cl *Client) Key(etype etype.EType, krberr *messages.KRBError) (types.EncryptionKey, error) { + if cl.Credentials.HasKeytab() && etype != nil { + return cl.Credentials.Keytab().GetEncryptionKey(cl.Credentials.CName(), cl.Credentials.Domain(), 0, etype.GetETypeID()) + } else if cl.Credentials.HasPassword() { + if krberr != nil && krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED { + var pas types.PADataSequence + err := pas.Unmarshal(krberr.EData) + if err != nil { + return types.EncryptionKey{}, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err) + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), krberr.CName, krberr.CRealm, etype.GetETypeID(), pas) + return key, err + } + key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), cl.Credentials.CName(), cl.Credentials.Domain(), etype.GetETypeID(), types.PADataSequence{}) + return key, err + } + return types.EncryptionKey{}, errors.New("credential has neither keytab or password to generate key") +} + +// IsConfigured indicates if the client has the values required set. +func (cl *Client) IsConfigured() (bool, error) { + if cl.Credentials.UserName() == "" { + return false, errors.New("client does not have a username") + } + if cl.Credentials.Domain() == "" { + return false, errors.New("client does not have a define realm") + } + // Client needs to have either a password, keytab or a session already (later when loading from CCache) + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + authTime, _, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || authTime.IsZero() { + return false, errors.New("client has neither a keytab nor a password set and no session") + } + } + if !cl.Config.LibDefaults.DNSLookupKDC { + for _, r := range cl.Config.Realms { + if r.Realm == cl.Credentials.Domain() { + if len(r.KDC) > 0 { + return true, nil + } + return false, errors.New("client krb5 config does not have any defined KDCs for the default realm") + } + } + } + return true, nil +} + +// Login the client with the KDC via an AS exchange. +func (cl *Client) Login() error { + if ok, err := cl.IsConfigured(); !ok { + return err + } + if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "no user credentials available and error getting any existing session") + } + if time.Now().UTC().After(endTime) { + return krberror.NewKrberror(krberror.KRBMsgError, "cannot login, no user credentials available and no valid existing session") + } + // no credentials but there is a session with tgt already + return nil + } + ASReq, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ") + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return err + } + cl.addSession(ASRep.Ticket, ASRep.DecryptedEncPart) + return nil +} + +// AffirmLogin will only perform an AS exchange with the KDC if the client does not already have a TGT. +func (cl *Client) AffirmLogin() error { + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + return nil +} + +// realmLogin obtains or renews a TGT and establishes a session for the realm specified. +func (cl *Client) realmLogin(realm string) error { + if realm == cl.Credentials.Domain() { + return cl.Login() + } + _, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain()) + if err != nil || time.Now().UTC().After(endTime) { + err := cl.Login() + if err != nil { + return fmt.Errorf("could not get valid TGT for client's realm: %v", err) + } + } + tgt, skey, err := cl.sessionTGT(cl.Credentials.Domain()) + if err != nil { + return err + } + + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, false) + if err != nil { + return err + } + cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) + + return nil +} + +// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client. +func (cl *Client) Destroy() { + creds := credentials.New("", "") + cl.sessions.destroy() + cl.cache.clear() + cl.Credentials = creds + cl.Log("client destroyed") +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go new file mode 100644 index 00000000..493fb2f3 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go @@ -0,0 +1,224 @@ +package client + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/messages" +) + +// SendToKDC performs network actions to send data to the KDC. +func (cl *Client) sendToKDC(b []byte, realm string) ([]byte, error) { + var rb []byte + if cl.Config.LibDefaults.UDPPreferenceLimit == 1 { + //1 means we should always use TCP + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + return rb, e + } + return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp) + } + return rb, nil + } + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + //Try UDP first, TCP second + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG { + // Got a KRBError from KDC + // If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP. + return rb, e + } + // Try TCP + r, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError + return r, e + } + return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp) + } + rb = r + } + return rb, nil + } + //Try TCP first, UDP second + rb, errtcp := cl.sendKDCTCP(realm, b) + if errtcp != nil { + if e, ok := errtcp.(messages.KRBError); ok { + // Got a KRBError from KDC so returning and not trying UDP. + return rb, e + } + rb, errudp := cl.sendKDCUDP(realm, b) + if errudp != nil { + if e, ok := errudp.(messages.KRBError); ok { + // Got a KRBError + return rb, e + } + return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp) + } + } + return rb, nil +} + +// dialKDCTCP establishes a UDP connection to a KDC. +func dialKDCUDP(count int, kdcs map[int]string) (*net.UDPConn, error) { + i := 1 + for i <= count { + udpAddr, err := net.ResolveUDPAddr("udp", kdcs[i]) + if err != nil { + return nil, fmt.Errorf("error resolving KDC address: %v", err) + } + + conn, err := net.DialTimeout("udp", udpAddr.String(), 5*time.Second) + if err == nil { + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + return nil, err + } + // conn is guaranteed to be a UDPConn + return conn.(*net.UDPConn), nil + } + i++ + } + return nil, errors.New("error in getting a UDP connection to any of the KDCs") +} + +// dialKDCTCP establishes a TCP connection to a KDC. +func dialKDCTCP(count int, kdcs map[int]string) (*net.TCPConn, error) { + i := 1 + for i <= count { + tcpAddr, err := net.ResolveTCPAddr("tcp", kdcs[i]) + if err != nil { + return nil, fmt.Errorf("error resolving KDC address: %v", err) + } + + conn, err := net.DialTimeout("tcp", tcpAddr.String(), 5*time.Second) + if err == nil { + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + return nil, err + } + // conn is guaranteed to be a TCPConn + return conn.(*net.TCPConn), nil + } + i++ + } + return nil, errors.New("error in getting a TCP connection to any of the KDCs") +} + +// sendKDCUDP sends bytes to the KDC via UDP. +func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) { + var r []byte + count, kdcs, err := cl.Config.GetKDCs(realm, false) + if err != nil { + return r, err + } + conn, err := dialKDCUDP(count, kdcs) + if err != nil { + return r, err + } + r, err = cl.sendUDP(conn, b) + if err != nil { + return r, err + } + return checkForKRBError(r) +} + +// sendKDCTCP sends bytes to the KDC via TCP. +func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) { + var r []byte + count, kdcs, err := cl.Config.GetKDCs(realm, true) + if err != nil { + return r, err + } + conn, err := dialKDCTCP(count, kdcs) + if err != nil { + return r, err + } + rb, err := cl.sendTCP(conn, b) + if err != nil { + return r, err + } + return checkForKRBError(rb) +} + +// sendUDP sends bytes to connection over UDP. +func (cl *Client) sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) { + var r []byte + defer conn.Close() + _, err := conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err) + } + udpbuf := make([]byte, 4096) + n, _, err := conn.ReadFrom(udpbuf) + r = udpbuf[:n] + if err != nil { + return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err) + } + if len(r) < 1 { + return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String()) + } + return r, nil +} + +// sendTCP sends bytes to connection over TCP. +func (cl *Client) sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) { + defer conn.Close() + var r []byte + /* + RFC https://tools.ietf.org/html/rfc4120#section-7.2.2 + Each request (KRB_KDC_REQ) and response (KRB_KDC_REP or KRB_ERROR) + sent over the TCP stream is preceded by the length of the request as + 4 octets in network byte order. The high bit of the length is + reserved for future expansion and MUST currently be set to zero. If + a KDC that does not understand how to interpret a set high bit of the + length encoding receives a request with the high order bit of the + length set, it MUST return a KRB-ERROR message with the error + KRB_ERR_FIELD_TOOLONG and MUST close the TCP stream. + NB: network byte order == big endian + */ + var buf bytes.Buffer + err := binary.Write(&buf, binary.BigEndian, uint32(len(b))) + if err != nil { + return r, err + } + b = append(buf.Bytes(), b...) + + _, err = conn.Write(b) + if err != nil { + return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err) + } + + sh := make([]byte, 4, 4) + _, err = conn.Read(sh) + if err != nil { + return r, fmt.Errorf("error reading response size header: %v", err) + } + s := binary.BigEndian.Uint32(sh) + + rb := make([]byte, s, s) + _, err = io.ReadFull(conn, rb) + if err != nil { + return r, fmt.Errorf("error reading response: %v", err) + } + if len(rb) < 1 { + return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String()) + } + return rb, nil +} + +// checkForKRBError checks if the response bytes from the KDC are a KRBError. +func checkForKRBError(b []byte) ([]byte, error) { + var KRBErr messages.KRBError + if err := KRBErr.Unmarshal(b); err == nil { + return b, KRBErr + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go new file mode 100644 index 00000000..da838edc --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go @@ -0,0 +1,95 @@ +package client + +import ( + "fmt" + "net" + + "gopkg.in/jcmturner/gokrb5.v7/kadmin" + "gopkg.in/jcmturner/gokrb5.v7/messages" +) + +// Kpasswd server response codes. +const ( + KRB5_KPASSWD_SUCCESS = 0 + KRB5_KPASSWD_MALFORMED = 1 + KRB5_KPASSWD_HARDERROR = 2 + KRB5_KPASSWD_AUTHERROR = 3 + KRB5_KPASSWD_SOFTERROR = 4 + KRB5_KPASSWD_ACCESSDENIED = 5 + KRB5_KPASSWD_BAD_VERSION = 6 + KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7 +) + +// ChangePasswd changes the password of the client to the value provided. +func (cl *Client) ChangePasswd(newPasswd string) (bool, error) { + ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName()) + if err != nil { + return false, err + } + ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0) + if err != nil { + return false, err + } + + msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName(), cl.Credentials.Domain(), newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key) + if err != nil { + return false, err + } + r, err := cl.sendToKPasswd(msg) + if err != nil { + return false, err + } + err = r.Decrypt(key) + if err != nil { + return false, err + } + if r.ResultCode != KRB5_KPASSWD_SUCCESS { + return false, fmt.Errorf("error response from kadmin: code: %d; result: %s; krberror: %v", r.ResultCode, r.Result, r.KRBError) + } + cl.Credentials.WithPassword(newPasswd) + return true, nil +} + +func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) { + _, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Domain(), true) + if err != nil { + return + } + addr := kps[1] + b, err := msg.Marshal() + if err != nil { + return + } + if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { + return cl.sendKPasswdUDP(b, addr) + } + return cl.sendKPasswdTCP(b, addr) +} + +func (cl *Client) sendKPasswdTCP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", kadmindAddr) + if err != nil { + return + } + conn, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + return + } + rb, err := cl.sendTCP(conn, b) + err = r.Unmarshal(rb) + return +} + +func (cl *Client) sendKPasswdUDP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) { + udpAddr, err := net.ResolveUDPAddr("udp", kadmindAddr) + if err != nil { + return + } + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return + } + rb, err := cl.sendUDP(conn, b) + err = r.Unmarshal(rb) + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go new file mode 100644 index 00000000..ec6c513d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go @@ -0,0 +1,255 @@ +package client + +import ( + "fmt" + "strings" + "sync" + "time" + + "gopkg.in/jcmturner/gokrb5.v7/iana/nametype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// sessions hold TGTs and are keyed on the realm name +type sessions struct { + Entries map[string]*session + mux sync.RWMutex +} + +// destroy erases all sessions +func (s *sessions) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + for k, e := range s.Entries { + e.destroy() + delete(s.Entries, k) + } +} + +// update replaces a session with the one provided or adds it as a new one +func (s *sessions) update(sess *session) { + s.mux.Lock() + defer s.mux.Unlock() + // if a session already exists for this, cancel its auto renew. + if i, ok := s.Entries[sess.realm]; ok { + if i != sess { + // Session in the sessions cache is not the same as one provided. + // Cancel the one in the cache and add this one. + i.mux.Lock() + defer i.mux.Unlock() + i.cancel <- true + s.Entries[sess.realm] = sess + return + } + } + // No session for this realm was found so just add it + s.Entries[sess.realm] = sess +} + +// get returns the session for the realm specified +func (s *sessions) get(realm string) (*session, bool) { + s.mux.RLock() + defer s.mux.RUnlock() + sess, ok := s.Entries[realm] + return sess, ok +} + +// session holds the TGT details for a realm +type session struct { + realm string + authTime time.Time + endTime time.Time + renewTill time.Time + tgt messages.Ticket + sessionKey types.EncryptionKey + sessionKeyExpiration time.Time + cancel chan bool + mux sync.RWMutex +} + +// AddSession adds a session for a realm with a TGT to the client's session cache. +// A goroutine is started to automatically renew the TGT before expiry. +func (cl *Client) addSession(tgt messages.Ticket, dep messages.EncKDCRepPart) { + if strings.ToLower(tgt.SName.NameString[0]) != "krbtgt" { + // Not a TGT + return + } + realm := tgt.SName.NameString[len(tgt.SName.NameString)-1] + s := &session{ + realm: realm, + authTime: dep.AuthTime, + endTime: dep.EndTime, + renewTill: dep.RenewTill, + tgt: tgt, + sessionKey: dep.Key, + sessionKeyExpiration: dep.KeyExpiration, + } + cl.sessions.update(s) + cl.enableAutoSessionRenewal(s) + cl.Log("TGT session added for %s (EndTime: %v)", realm, dep.EndTime) +} + +// update overwrites the session details with those from the TGT and decrypted encPart +func (s *session) update(tgt messages.Ticket, dep messages.EncKDCRepPart) { + s.mux.Lock() + defer s.mux.Unlock() + s.authTime = dep.AuthTime + s.endTime = dep.EndTime + s.renewTill = dep.RenewTill + s.tgt = tgt + s.sessionKey = dep.Key + s.sessionKeyExpiration = dep.KeyExpiration +} + +// destroy will cancel any auto renewal of the session and set the expiration times to the current time +func (s *session) destroy() { + s.mux.Lock() + defer s.mux.Unlock() + if s.cancel != nil { + s.cancel <- true + } + s.endTime = time.Now().UTC() + s.renewTill = s.endTime + s.sessionKeyExpiration = s.endTime +} + +// valid informs if the TGT is still within the valid time window +func (s *session) valid() bool { + s.mux.RLock() + defer s.mux.RUnlock() + t := time.Now().UTC() + if t.Before(s.endTime) && s.authTime.Before(t) { + return true + } + return false +} + +// tgtDetails is a thread safe way to get the session's realm, TGT and session key values +func (s *session) tgtDetails() (string, messages.Ticket, types.EncryptionKey) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.tgt, s.sessionKey +} + +// timeDetails is a thread safe way to get the session's validity time values +func (s *session) timeDetails() (string, time.Time, time.Time, time.Time, time.Time) { + s.mux.RLock() + defer s.mux.RUnlock() + return s.realm, s.authTime, s.endTime, s.renewTill, s.sessionKeyExpiration +} + +// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session. +func (cl *Client) enableAutoSessionRenewal(s *session) { + var timer *time.Timer + s.mux.Lock() + s.cancel = make(chan bool, 1) + s.mux.Unlock() + go func(s *session) { + for { + s.mux.RLock() + w := (s.endTime.Sub(time.Now().UTC()) * 5) / 6 + s.mux.RUnlock() + if w < 0 { + return + } + timer = time.NewTimer(w) + select { + case <-timer.C: + renewal, err := cl.refreshSession(s) + if err != nil { + cl.Log("error refreshing session: %v", err) + } + if !renewal && err == nil { + // end this goroutine as there will have been a new login and new auto renewal goroutine created. + return + } + case <-s.cancel: + // cancel has been called. Stop the timer and exit. + timer.Stop() + return + } + } + }(s) +} + +// renewTGT renews the client's TGT session. +func (cl *Client) renewTGT(s *session) error { + realm, tgt, skey := s.tgtDetails() + spn := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + _, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, true) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT for %s", realm) + } + s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart) + cl.sessions.update(s) + cl.Log("TGT session renewed for %s (EndTime: %v)", realm, tgsRep.DecryptedEncPart.EndTime) + return nil +} + +// refreshSession updates either through renewal or creating a new login. +// The boolean indicates if the update was a renewal. +func (cl *Client) refreshSession(s *session) (bool, error) { + s.mux.RLock() + realm := s.realm + renewTill := s.renewTill + s.mux.RUnlock() + cl.Log("refreshing TGT session for %s", realm) + if time.Now().UTC().Before(renewTill) { + err := cl.renewTGT(s) + return true, err + } + err := cl.realmLogin(realm) + return false, err +} + +// ensureValidSession makes sure there is a valid session for the realm +func (cl *Client) ensureValidSession(realm string) error { + s, ok := cl.sessions.get(realm) + if ok { + s.mux.RLock() + d := s.endTime.Sub(s.authTime) / 6 + if s.endTime.Sub(time.Now().UTC()) > d { + s.mux.RUnlock() + return nil + } + s.mux.RUnlock() + _, err := cl.refreshSession(s) + return err + } + return cl.realmLogin(realm) +} + +// sessionTGTDetails is a thread safe way to get the TGT and session key values for a realm +func (cl *Client) sessionTGT(realm string) (tgt messages.Ticket, sessionKey types.EncryptionKey, err error) { + err = cl.ensureValidSession(realm) + if err != nil { + return + } + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, tgt, sessionKey = s.tgtDetails() + return +} + +func (cl *Client) sessionTimes(realm string) (authTime, endTime, renewTime, sessionExp time.Time, err error) { + s, ok := cl.sessions.get(realm) + if !ok { + err = fmt.Errorf("could not find TGT session for %s", realm) + return + } + _, authTime, endTime, renewTime, sessionExp = s.timeDetails() + return +} + +// spnRealm resolves the realm name of a service principal name +func (cl *Client) spnRealm(spn types.PrincipalName) string { + return cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1]) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go new file mode 100644 index 00000000..12c04c45 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go @@ -0,0 +1,69 @@ +package client + +import "log" + +// Settings holds optional client settings. +type Settings struct { + disablePAFXFast bool + assumePreAuthentication bool + preAuthEType int32 + logger *log.Logger +} + +// NewSettings creates a new client settings struct. +func NewSettings(settings ...func(*Settings)) *Settings { + s := new(Settings) + for _, set := range settings { + set(s) + } + return s +} + +// DisablePAFXFAST used to configure the client to not use PA_FX_FAST. +// +// s := NewSettings(DisablePAFXFAST(true)) +func DisablePAFXFAST(b bool) func(*Settings) { + return func(s *Settings) { + s.disablePAFXFast = b + } +} + +// DisablePAFXFAST indicates is the client should disable the use of PA_FX_FAST. +func (s *Settings) DisablePAFXFAST() bool { + return s.disablePAFXFast +} + +// AssumePreAuthentication used to configure the client to assume pre-authentication is required. +// +// s := NewSettings(AssumePreAuthentication(true)) +func AssumePreAuthentication(b bool) func(*Settings) { + return func(s *Settings) { + s.assumePreAuthentication = b + } +} + +// AssumePreAuthentication indicates if the client should proactively assume using pre-authentication. +func (s *Settings) AssumePreAuthentication() bool { + return s.assumePreAuthentication +} + +// Logger used to configure client with a logger. +// +// s := NewSettings(kt, Logger(l)) +func Logger(l *log.Logger) func(*Settings) { + return func(s *Settings) { + s.logger = l + } +} + +// Logger returns the client logger instance. +func (s *Settings) Logger() *log.Logger { + return s.logger +} + +// Log will write to the service's logger if it is configured. +func (cl *Client) Log(format string, v ...interface{}) { + if cl.settings.Logger() != nil { + cl.settings.Logger().Printf(format, v...) + } +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go new file mode 100644 index 00000000..1fbda51f --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go @@ -0,0 +1,30 @@ +package config + +import "fmt" + +// UnsupportedDirective error. +type UnsupportedDirective struct { + text string +} + +// Error implements the error interface for unsupported directives. +func (e UnsupportedDirective) Error() string { + return e.text +} + +// Invalid config error. +type Invalid struct { + text string +} + +// Error implements the error interface for invalid config error. +func (e Invalid) Error() string { + return e.text +} + +// InvalidErrorf creates a new Invalid error. +func InvalidErrorf(format string, a ...interface{}) Invalid { + return Invalid{ + text: fmt.Sprintf("invalid krb5 config "+format, a...), + } +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go new file mode 100644 index 00000000..a67989f9 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go @@ -0,0 +1,141 @@ +package config + +import ( + "fmt" + "math/rand" + "net" + "strconv" + "strings" + + "gopkg.in/jcmturner/dnsutils.v1" +) + +// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order. +func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) { + if realm == "" { + realm = c.LibDefaults.DefaultRealm + } + kdcs := make(map[int]string) + var count int + + // Get the KDCs from the krb5.conf. + var ks []string + for _, r := range c.Realms { + if r.Realm != realm { + continue + } + ks = r.KDC + } + count = len(ks) + + if count > 0 { + // Order the kdcs randomly for preference. + kdcs = randServOrder(ks) + return count, kdcs, nil + } + + if !c.LibDefaults.DNSLookupKDC { + return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) + } + + // Use DNS to resolve kerberos SRV records. + proto := "udp" + if tcp { + proto = "tcp" + } + index, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) + if err != nil { + return count, kdcs, err + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) + } + count = index + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + return count, kdcs, nil +} + +// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order. +// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section +func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) { + kdcs := make(map[int]string) + var count int + + // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. + if c.LibDefaults.DNSLookupKDC { + proto := "udp" + if tcp { + proto = "tcp" + } + c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm) + if err != nil { + return count, kdcs, err + } + if c < 1 { + c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm) + if err != nil { + return count, kdcs, err + } + } + if len(addrs) < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm) + } + count = c + for k, v := range addrs { + kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) + } + } else { + // Get the KDCs from the krb5.conf an order them randomly for preference. + var ks []string + var ka []string + for _, r := range c.Realms { + if r.Realm == realm { + ks = r.KPasswdServer + ka = r.AdminServer + break + } + } + if len(ks) < 1 { + for _, k := range ka { + h, _, err := net.SplitHostPort(k) + if err != nil { + continue + } + ks = append(ks, h+":464") + } + } + count = len(ks) + if count < 1 { + return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm) + } + kdcs = randServOrder(ks) + } + return count, kdcs, nil +} + +func randServOrder(ks []string) map[int]string { + kdcs := make(map[int]string) + count := len(ks) + i := 1 + if count > 1 { + l := len(ks) + for l > 0 { + ri := rand.Intn(l) + kdcs[i] = ks[ri] + if l > 1 { + // Remove the entry from the source slice by swapping with the last entry and truncating + ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1] + ks = ks[:len(ks)-1] + l = len(ks) + } else { + l = 0 + } + i++ + } + } else { + kdcs[i] = ks[0] + } + return kdcs +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go new file mode 100644 index 00000000..8efe92d8 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go @@ -0,0 +1,726 @@ +// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html +package config + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "regexp" + "strconv" + "strings" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// Config represents the KRB5 configuration. +type Config struct { + LibDefaults LibDefaults + Realms []Realm + DomainRealm DomainRealm + //CaPaths + //AppDefaults + //Plugins +} + +// WeakETypeList is a list of encryption types that have been deemed weak. +const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des" + +// NewConfig creates a new config struct instance. +func NewConfig() *Config { + d := make(DomainRealm) + return &Config{ + LibDefaults: newLibDefaults(), + DomainRealm: d, + } +} + +// LibDefaults represents the [libdefaults] section of the configuration. +type LibDefaults struct { + AllowWeakCrypto bool //default false + // ap_req_checksum_type int //unlikely to support this + Canonicalize bool //default false + CCacheType int //default is 4. unlikely to implement older + Clockskew time.Duration //max allowed skew in seconds, default 300 + //Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory + DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab + DefaultKeytabName string //default /etc/krb5.keytab + DefaultRealm string + DefaultTGSEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTGSEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DefaultTktEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + DNSCanonicalizeHostname bool //default true + DNSLookupKDC bool //default false + DNSLookupRealm bool + ExtraAddresses []net.IP //Not implementing yet + Forwardable bool //default false + IgnoreAcceptorHostname bool //default false + K5LoginAuthoritative bool //default false + K5LoginDirectory string //default user's home directory. Must be owned by the user or root + KDCDefaultOptions asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK) + KDCTimeSync int //default 1 + //kdc_req_checksum_type int //unlikely to implement as for very old KDCs + NoAddresses bool //default true + PermittedEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 + PermittedEnctypeIDs []int32 + //plugin_base_dir string //not supporting plugins + PreferredPreauthTypes []int //default “17, 16, 15, 14”, which forces libkrb5 to attempt to use PKINIT if it is supported + Proxiable bool //default false + RDNS bool //default true + RealmTryDomains int //default -1 + RenewLifetime time.Duration //default 0 + SafeChecksumType int //default 8 + TicketLifetime time.Duration //default 1 day + UDPPreferenceLimit int // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700. + VerifyAPReqNofail bool //default false +} + +// Create a new LibDefaults struct. +func newLibDefaults() LibDefaults { + uid := "0" + var hdir string + usr, _ := user.Current() + if usr != nil { + uid = usr.Uid + hdir = usr.HomeDir + } + opts := asn1.BitString{} + opts.Bytes, _ = hex.DecodeString("00000010") + opts.BitLength = len(opts.Bytes) * 8 + return LibDefaults{ + CCacheType: 4, + Clockskew: time.Duration(300) * time.Second, + DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid), + DefaultKeytabName: "/etc/krb5.keytab", + DefaultTGSEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DefaultTktEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + DNSCanonicalizeHostname: true, + K5LoginDirectory: hdir, + KDCDefaultOptions: opts, + KDCTimeSync: 1, + NoAddresses: true, + PermittedEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, + RDNS: true, + RealmTryDomains: -1, + SafeChecksumType: 8, + TicketLifetime: time.Duration(24) * time.Hour, + UDPPreferenceLimit: 1465, + PreferredPreauthTypes: []int{17, 16, 15, 14}, + } +} + +// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct. +func (l *LibDefaults) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("libdefaults section line (%s)", line) + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + switch key { + case "allow_weak_crypto": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.AllowWeakCrypto = v + case "canonicalize": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Canonicalize = v + case "ccache_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v < 0 || v > 4 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.CCacheType = int(v) + case "clockskew": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Clockskew = d + case "default_client_keytab_name": + l.DefaultClientKeytabName = strings.TrimSpace(p[1]) + case "default_keytab_name": + l.DefaultKeytabName = strings.TrimSpace(p[1]) + case "default_realm": + l.DefaultRealm = strings.TrimSpace(p[1]) + case "default_tgs_enctypes": + l.DefaultTGSEnctypes = strings.Fields(p[1]) + case "default_tkt_enctypes": + l.DefaultTktEnctypes = strings.Fields(p[1]) + case "dns_canonicalize_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSCanonicalizeHostname = v + case "dns_lookup_kdc": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupKDC = v + case "dns_lookup_realm": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.DNSLookupRealm = v + case "extra_addresses": + ipStr := strings.TrimSpace(p[1]) + for _, ip := range strings.Split(ipStr, ",") { + if eip := net.ParseIP(ip); eip != nil { + l.ExtraAddresses = append(l.ExtraAddresses, eip) + } + } + case "forwardable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Forwardable = v + case "ignore_acceptor_hostname": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.IgnoreAcceptorHostname = v + case "k5login_authoritative": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.K5LoginAuthoritative = v + case "k5login_directory": + l.K5LoginDirectory = strings.TrimSpace(p[1]) + case "kdc_default_options": + v := strings.TrimSpace(p[1]) + v = strings.Replace(v, "0x", "", -1) + b, err := hex.DecodeString(v) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.KDCDefaultOptions.Bytes = b + l.KDCDefaultOptions.BitLength = len(b) * 8 + case "kdc_timesync": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.KDCTimeSync = int(v) + case "noaddresses": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.NoAddresses = v + case "permitted_enctypes": + l.PermittedEnctypes = strings.Fields(p[1]) + case "preferred_preauth_types": + p[1] = strings.TrimSpace(p[1]) + t := strings.Split(p[1], ",") + var v []int + for _, s := range t { + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + v = append(v, int(i)) + } + l.PreferredPreauthTypes = v + case "proxiable": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.Proxiable = v + case "rdns": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RDNS = v + case "realm_try_domains": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < -1 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.RealmTryDomains = int(v) + case "renew_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.RenewLifetime = d + case "safe_checksum_type": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseInt(p[1], 10, 32) + if err != nil || v < 0 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.SafeChecksumType = int(v) + case "ticket_lifetime": + d, err := parseDuration(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.TicketLifetime = d + case "udp_preference_limit": + p[1] = strings.TrimSpace(p[1]) + v, err := strconv.ParseUint(p[1], 10, 32) + if err != nil || v > 32700 { + return InvalidErrorf("libdefaults section line (%s)", line) + } + l.UDPPreferenceLimit = int(v) + case "verify_ap_req_nofail": + v, err := parseBoolean(p[1]) + if err != nil { + return InvalidErrorf("libdefaults section line (%s): %v", line, err) + } + l.VerifyAPReqNofail = v + default: + //Ignore the line + continue + } + } + l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto) + l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto) + l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto) + return nil +} + +// Realm represents an entry in the [realms] section of the configuration. +type Realm struct { + Realm string + AdminServer []string + //auth_to_local //Not implementing for now + //auth_to_local_names //Not implementing for now + DefaultDomain string + KDC []string + KPasswdServer []string //default admin_server:464 + MasterKDC []string +} + +// Parse the lines of a [realms] entry into the Realm struct. +func (r *Realm) parseLines(name string, lines []string) (err error) { + r.Realm = name + var adminServerFinal bool + var KDCFinal bool + var kpasswdServerFinal bool + var masterKDCFinal bool + var ignore bool + var c int // counts the depth of blocks within brackets { } + for _, line := range lines { + if ignore && c > 0 && !strings.Contains(line, "{") && !strings.Contains(line, "}") { + continue + } + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.Contains(line, "=") && !strings.Contains(line, "}") { + return InvalidErrorf("realms section line (%s)", line) + } + if strings.Contains(line, "v4_") { + ignore = true + err = UnsupportedDirective{"v4 configurations are not supported"} + } + if strings.Contains(line, "{") { + c++ + if ignore { + continue + } + } + if strings.Contains(line, "}") { + c-- + if c < 0 { + return InvalidErrorf("unpaired curly brackets") + } + if ignore { + if c < 1 { + c = 0 + ignore = false + } + continue + } + } + + p := strings.Split(line, "=") + key := strings.TrimSpace(strings.ToLower(p[0])) + v := strings.TrimSpace(p[1]) + switch key { + case "admin_server": + appendUntilFinal(&r.AdminServer, v, &adminServerFinal) + case "default_domain": + r.DefaultDomain = v + case "kdc": + if !strings.Contains(v, ":") { + // No port number specified default to 88 + if strings.HasSuffix(v, `*`) { + v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*" + } else { + v = strings.TrimSpace(v) + ":88" + } + } + appendUntilFinal(&r.KDC, v, &KDCFinal) + case "kpasswd_server": + appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal) + case "master_kdc": + appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal) + default: + //Ignore the line + continue + } + } + //default for Kpasswd_server = admin_server:464 + if len(r.KPasswdServer) < 1 { + for _, a := range r.AdminServer { + s := strings.Split(a, ":") + r.KPasswdServer = append(r.KPasswdServer, s[0]+":464") + } + } + return +} + +// Parse the lines of the [realms] section of the configuration into an slice of Realm structs. +func parseRealms(lines []string) (realms []Realm, err error) { + var name string + var start int + var c int + for i, l := range lines { + //Remove comments after the values + if idx := strings.IndexAny(l, "#;"); idx != -1 { + l = l[:idx] + } + l = strings.TrimSpace(l) + if l == "" { + continue + } + //if strings.Contains(l, "v4_") { + // return nil, errors.New("v4 configurations are not supported in Realms section") + //} + if strings.Contains(l, "{") { + c++ + if !strings.Contains(l, "=") { + return nil, fmt.Errorf("realm configuration line invalid: %s", l) + } + if c == 1 { + start = i + p := strings.Split(l, "=") + name = strings.TrimSpace(p[0]) + } + } + if strings.Contains(l, "}") { + if c < 1 { + // but not started a block!!! + return nil, errors.New("invalid Realms section in configuration") + } + c-- + if c == 0 { + var r Realm + e := r.parseLines(name, lines[start+1:i]) + if e != nil { + if _, ok := e.(UnsupportedDirective); !ok { + err = e + return + } + err = e + } + realms = append(realms, r) + } + } + } + return +} + +// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration. +type DomainRealm map[string]string + +// Parse the lines of the [domain_realm] section of the configuration and add to the mapping. +func (d *DomainRealm) parseLines(lines []string) error { + for _, line := range lines { + //Remove comments after the values + if idx := strings.IndexAny(line, "#;"); idx != -1 { + line = line[:idx] + } + if strings.TrimSpace(line) == "" { + continue + } + if !strings.Contains(line, "=") { + return InvalidErrorf("realm line (%s)", line) + } + p := strings.Split(line, "=") + domain := strings.TrimSpace(strings.ToLower(p[0])) + realm := strings.TrimSpace(p[1]) + d.addMapping(domain, realm) + } + return nil +} + +// Add a domain to realm mapping. +func (d *DomainRealm) addMapping(domain, realm string) { + (*d)[domain] = realm +} + +// Delete a domain to realm mapping. +func (d *DomainRealm) deleteMapping(domain, realm string) { + delete(*d, domain) +} + +// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping. +// The most specific mapping is returned. +func (c *Config) ResolveRealm(domainName string) string { + domainName = strings.TrimSuffix(domainName, ".") + + // Try to match the entire hostname first + if r, ok := c.DomainRealm[domainName]; ok { + return r + } + + // Try to match all DNS domain parts + periods := strings.Count(domainName, ".") + 1 + for i := 2; i <= periods; i++ { + z := strings.SplitN(domainName, ".", i) + if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok { + return r + } + } + return c.LibDefaults.DefaultRealm +} + +// Load the KRB5 configuration from the specified file path. +func Load(cfgPath string) (*Config, error) { + fh, err := os.Open(cfgPath) + if err != nil { + return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error()) + } + defer fh.Close() + scanner := bufio.NewScanner(fh) + return NewConfigFromScanner(scanner) +} + +// NewConfigFromString creates a new Config struct from a string. +func NewConfigFromString(s string) (*Config, error) { + reader := strings.NewReader(s) + return NewConfigFromReader(reader) +} + +// NewConfigFromReader creates a new Config struct from an io.Reader. +func NewConfigFromReader(r io.Reader) (*Config, error) { + scanner := bufio.NewScanner(r) + return NewConfigFromScanner(scanner) +} + +// NewConfigFromScanner creates a new Config struct from a bufio.Scanner. +func NewConfigFromScanner(scanner *bufio.Scanner) (*Config, error) { + c := NewConfig() + var e error + sections := make(map[int]string) + var sectionLineNum []int + var lines []string + for scanner.Scan() { + // Skip comments and blank lines + if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched { + continue + } + if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "libdefaults" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "realms" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "domain_realm" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched { + sections[len(lines)] = "unknown_section" + sectionLineNum = append(sectionLineNum, len(lines)) + continue + } + lines = append(lines, scanner.Text()) + } + for i, start := range sectionLineNum { + var end int + if i+1 >= len(sectionLineNum) { + end = len(lines) + } else { + end = sectionLineNum[i+1] + } + switch section := sections[start]; section { + case "libdefaults": + err := c.LibDefaults.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing libdefaults section: %v", err) + } + e = err + } + case "realms": + realms, err := parseRealms(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing realms section: %v", err) + } + e = err + } + c.Realms = realms + case "domain_realm": + err := c.DomainRealm.parseLines(lines[start:end]) + if err != nil { + if _, ok := err.(UnsupportedDirective); !ok { + return nil, fmt.Errorf("error processing domaain_realm section: %v", err) + } + e = err + } + default: + continue + } + } + return c, e +} + +// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes. +func parseETypes(s []string, w bool) []int32 { + var eti []int32 + for _, et := range s { + if !w { + var weak bool + for _, wet := range strings.Fields(WeakETypeList) { + if et == wet { + weak = true + break + } + } + if weak { + continue + } + } + i := etypeID.EtypeSupported(et) + if i != 0 { + eti = append(eti, i) + } + } + return eti +} + +// Parse a time duration string in the configuration to a golang time.Duration. +func parseDuration(s string) (time.Duration, error) { + s = strings.Replace(strings.TrimSpace(s), " ", "", -1) + + // handle Nd[NmNs] + if strings.Contains(s, "d") { + ds := strings.SplitN(s, "d", 2) + dn, err := strconv.ParseUint(ds[0], 10, 32) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d := time.Duration(dn*24) * time.Hour + if ds[1] != "" { + dp, err := time.ParseDuration(ds[1]) + if err != nil { + return time.Duration(0), errors.New("invalid time duration") + } + d = d + dp + } + return d, nil + } + + // handle Nm[Ns] + d, err := time.ParseDuration(s) + if err == nil { + return d, nil + } + + // handle N + v, err := strconv.ParseUint(s, 10, 32) + if err == nil && v > 0 { + return time.Duration(v) * time.Second, nil + } + + // handle h:m[:s] + if strings.Contains(s, ":") { + t := strings.Split(s, ":") + if 2 > len(t) || len(t) > 3 { + return time.Duration(0), errors.New("invalid time duration value") + } + var i []int + for _, n := range t { + j, err := strconv.ParseInt(n, 10, 16) + if err != nil { + return time.Duration(0), errors.New("invalid time duration value") + } + i = append(i, int(j)) + } + d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute + if len(i) == 3 { + d = d + time.Duration(i[2])*time.Second + } + return d, nil + } + return time.Duration(0), errors.New("invalid time duration value") +} + +// Parse possible boolean values to golang bool. +func parseBoolean(s string) (bool, error) { + s = strings.TrimSpace(s) + v, err := strconv.ParseBool(s) + if err == nil { + return v, nil + } + switch strings.ToLower(s) { + case "yes": + return true, nil + case "y": + return true, nil + case "no": + return false, nil + case "n": + return false, nil + } + return false, errors.New("invalid boolean value") +} + +// Parse array of strings but stop if an asterisk is placed at the end of a line. +func appendUntilFinal(s *[]string, value string, final *bool) { + if *final { + return + } + if last := len(value) - 1; last >= 0 && value[last] == '*' { + *final = true + value = value[:len(value)-1] + } + *s = append(*s, value) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go new file mode 100644 index 00000000..98ec29b9 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go @@ -0,0 +1,348 @@ +package credentials + +import ( + "bytes" + "encoding/binary" + "errors" + "io/ioutil" + "strings" + "time" + "unsafe" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + headerFieldTagKDCOffset = 1 +) + +// The first byte of the file always has the value 5. +// The value of the second byte contains the version number (1 through 4) +// Versions 1 and 2 of the file format use native byte order for integer representations. +// Versions 3 and 4 always use big-endian byte order +// After the two-byte version indicator, the file has three parts: +// 1) the header (in version 4 only) +// 2) the default principal name +// 3) a sequence of credentials + +// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html +type CCache struct { + Version uint8 + Header header + DefaultPrincipal principal + Credentials []*Credential + Path string +} + +type header struct { + length uint16 + fields []headerField +} + +type headerField struct { + tag uint16 + length uint16 + value []byte +} + +// Credential cache entry principal struct. +type principal struct { + Realm string + PrincipalName types.PrincipalName +} + +// Credential holds a Kerberos client's ccache credential information. +type Credential struct { + Client principal + Server principal + Key types.EncryptionKey + AuthTime time.Time + StartTime time.Time + EndTime time.Time + RenewTill time.Time + IsSKey bool + TicketFlags asn1.BitString + Addresses []types.HostAddress + AuthData []types.AuthorizationDataEntry + Ticket []byte + SecondTicket []byte +} + +// LoadCCache loads a credential cache file into a CCache type. +func LoadCCache(cpath string) (*CCache, error) { + c := new(CCache) + b, err := ioutil.ReadFile(cpath) + if err != nil { + return c, err + } + err = c.Unmarshal(b) + return c, err +} + +// Unmarshal a byte slice of credential cache data into CCache type. +func (c *CCache) Unmarshal(b []byte) error { + p := 0 + //The first byte of the file always has the value 5 + if int8(b[p]) != 5 { + return errors.New("Invalid credential cache data. First byte does not equal 5") + } + p++ + //Get credential cache version + //The second byte contains the version number (1 to 4) + c.Version = b[p] + if c.Version < 1 || c.Version > 4 { + return errors.New("Invalid credential cache data. Keytab version is not within 1 to 4") + } + p++ + //Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() { + endian = binary.LittleEndian + } + if c.Version == 4 { + err := parseHeader(b, &p, c, &endian) + if err != nil { + return err + } + } + c.DefaultPrincipal = parsePrincipal(b, &p, c, &endian) + for p < len(b) { + cred, err := parseCredential(b, &p, c, &endian) + if err != nil { + return err + } + c.Credentials = append(c.Credentials, cred) + } + return nil +} + +func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error { + if c.Version != 4 { + return errors.New("Credentials cache version is not 4 so there is no header to parse.") + } + h := header{} + h.length = uint16(readInt16(b, p, e)) + for *p <= int(h.length) { + f := headerField{} + f.tag = uint16(readInt16(b, p, e)) + f.length = uint16(readInt16(b, p, e)) + f.value = b[*p : *p+int(f.length)] + *p += int(f.length) + if !f.valid() { + return errors.New("Invalid credential cache header found") + } + h.fields = append(h.fields, f) + } + c.Header = h + return nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) { + if c.Version != 1 { + //Name Type is omitted in version 1 + princ.PrincipalName.NameType = readInt32(b, p, e) + } + nc := int(readInt32(b, p, e)) + if c.Version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + nc-- + } + lenRealm := readInt32(b, p, e) + princ.Realm = string(readBytes(b, p, int(lenRealm), e)) + for i := 0; i < nc; i++ { + l := readInt32(b, p, e) + princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e))) + } + return princ +} + +func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred *Credential, err error) { + cred = new(Credential) + cred.Client = parsePrincipal(b, p, c, e) + cred.Server = parsePrincipal(b, p, c, e) + key := types.EncryptionKey{} + key.KeyType = int32(readInt16(b, p, e)) + if c.Version == 3 { + //repeated twice in version 3 + key.KeyType = int32(readInt16(b, p, e)) + } + key.KeyValue = readData(b, p, e) + cred.Key = key + cred.AuthTime = readTimestamp(b, p, e) + cred.StartTime = readTimestamp(b, p, e) + cred.EndTime = readTimestamp(b, p, e) + cred.RenewTill = readTimestamp(b, p, e) + if ik := readInt8(b, p, e); ik == 0 { + cred.IsSKey = false + } else { + cred.IsSKey = true + } + cred.TicketFlags = types.NewKrbFlags() + cred.TicketFlags.Bytes = readBytes(b, p, 4, e) + l := int(readInt32(b, p, e)) + cred.Addresses = make([]types.HostAddress, l, l) + for i := range cred.Addresses { + cred.Addresses[i] = readAddress(b, p, e) + } + l = int(readInt32(b, p, e)) + cred.AuthData = make([]types.AuthorizationDataEntry, l, l) + for i := range cred.AuthData { + cred.AuthData[i] = readAuthDataEntry(b, p, e) + } + cred.Ticket = readData(b, p, e) + cred.SecondTicket = readData(b, p, e) + return +} + +// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for. +func (c *CCache) GetClientPrincipalName() types.PrincipalName { + return c.DefaultPrincipal.PrincipalName +} + +// GetClientRealm returns the reals of the client the credentials cache is for. +func (c *CCache) GetClientRealm() string { + return c.DefaultPrincipal.Realm +} + +// GetClientCredentials returns a Credentials object representing the client of the credentials cache. +func (c *CCache) GetClientCredentials() *Credentials { + return &Credentials{ + username: c.DefaultPrincipal.PrincipalName.PrincipalNameString(), + realm: c.GetClientRealm(), + cname: c.DefaultPrincipal.PrincipalName, + } +} + +// Contains tests if the cache contains a credential for the provided server PrincipalName +func (c *CCache) Contains(p types.PrincipalName) bool { + for _, cred := range c.Credentials { + if cred.Server.PrincipalName.Equal(p) { + return true + } + } + return false +} + +// GetEntry returns a specific credential for the PrincipalName provided. +func (c *CCache) GetEntry(p types.PrincipalName) (*Credential, bool) { + cred := new(Credential) + var found bool + for i := range c.Credentials { + if c.Credentials[i].Server.PrincipalName.Equal(p) { + cred = c.Credentials[i] + found = true + break + } + } + if !found { + return cred, false + } + return cred, true +} + +// GetEntries filters out configuration entries an returns a slice of credentials. +func (c *CCache) GetEntries() []*Credential { + creds := make([]*Credential, 0) + for _, cred := range c.Credentials { + // Filter out configuration entries + if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") { + continue + } + creds = append(creds, cred) + } + return creds +} + +func (h *headerField) valid() bool { + // At this time there is only one defined header field. + // Its tag value is 1, its length is always 8. + // Its contents are two 32-bit integers giving the seconds and microseconds + // of the time offset of the KDC relative to the client. + // Adding this offset to the current time on the client should give the current time on the KDC, if that offset has not changed since the initial authentication. + + // Done as a switch in case other tag values are added in the future. + switch h.tag { + case headerFieldTagKDCOffset: + if h.length != 8 || len(h.value) != 8 { + return false + } + return true + } + return false +} + +func readData(b []byte, p *int, e *binary.ByteOrder) []byte { + l := readInt32(b, p, e) + return readBytes(b, p, int(l), e) +} + +func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress { + a := types.HostAddress{} + a.AddrType = int32(readInt16(b, p, e)) + a.Address = readData(b, p, e) + return a +} + +func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry { + a := types.AuthorizationDataEntry{} + a.ADType = int32(readInt16(b, p, e)) + a.ADData = readData(b, p, e) + return a +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { + return time.Unix(int64(readInt32(b, p, e)), 0) +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { + buf := bytes.NewBuffer(b[*p : *p+s]) + r := make([]byte, s) + binary.Read(buf, *e, &r) + *p += s + return r +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go new file mode 100644 index 00000000..beec0664 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go @@ -0,0 +1,314 @@ +// Package credentials provides credentials management for Kerberos 5 authentication. +package credentials + +import ( + "time" + + "github.com/hashicorp/go-uuid" + "gopkg.in/jcmturner/gokrb5.v7/iana/nametype" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + // AttributeKeyADCredentials assigned number for AD credentials. + AttributeKeyADCredentials = "gokrb5AttributeKeyADCredentials" +) + +// Credentials struct for a user. +// Contains either a keytab, password or both. +// Keytabs are used over passwords if both are defined. +type Credentials struct { + username string + displayName string + realm string + cname types.PrincipalName + keytab *keytab.Keytab + password string + attributes map[string]interface{} + validUntil time.Time + + authenticated bool + human bool + authTime time.Time + groupMembership map[string]bool + sessionID string +} + +// ADCredentials contains information obtained from the PAC. +type ADCredentials struct { + EffectiveName string + FullName string + UserID int + PrimaryGroupID int + LogOnTime time.Time + LogOffTime time.Time + PasswordLastSet time.Time + GroupMembershipSIDs []string + LogonDomainName string + LogonDomainID string + LogonServer string +} + +// New creates a new Credentials instance. +func New(username string, realm string) *Credentials { + uid, err := uuid.GenerateUUID() + if err != nil { + uid = "00unique-sess-ions-uuid-unavailable0" + } + return &Credentials{ + username: username, + displayName: username, + realm: realm, + cname: types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username), + keytab: keytab.New(), + attributes: make(map[string]interface{}), + groupMembership: make(map[string]bool), + sessionID: uid, + human: true, + } +} + +// NewFromPrincipalName creates a new Credentials instance with the user details provides as a PrincipalName type. +func NewFromPrincipalName(cname types.PrincipalName, realm string) *Credentials { + uid, err := uuid.GenerateUUID() + if err != nil { + uid = "00unique-sess-ions-uuid-unavailable0" + } + return &Credentials{ + username: cname.PrincipalNameString(), + displayName: cname.PrincipalNameString(), + realm: realm, + cname: cname, + keytab: keytab.New(), + attributes: make(map[string]interface{}), + groupMembership: make(map[string]bool), + sessionID: uid, + human: true, + } +} + +// WithKeytab sets the Keytab in the Credentials struct. +func (c *Credentials) WithKeytab(kt *keytab.Keytab) *Credentials { + c.keytab = kt + c.password = "" + return c +} + +// Keytab returns the credential's Keytab. +func (c *Credentials) Keytab() *keytab.Keytab { + return c.keytab +} + +// HasKeytab queries if the Credentials has a keytab defined. +func (c *Credentials) HasKeytab() bool { + if c.keytab != nil && len(c.keytab.Entries) > 0 { + return true + } + return false +} + +// WithPassword sets the password in the Credentials struct. +func (c *Credentials) WithPassword(password string) *Credentials { + c.password = password + c.keytab = keytab.New() // clear any keytab + return c +} + +// Password returns the credential's password. +func (c *Credentials) Password() string { + return c.password +} + +// HasPassword queries if the Credentials has a password defined. +func (c *Credentials) HasPassword() bool { + if c.password != "" { + return true + } + return false +} + +// SetValidUntil sets the expiry time of the credentials +func (c *Credentials) SetValidUntil(t time.Time) { + c.validUntil = t +} + +// SetADCredentials adds ADCredentials attributes to the credentials +func (c *Credentials) SetADCredentials(a ADCredentials) { + c.SetAttribute(AttributeKeyADCredentials, a) + if a.FullName != "" { + c.SetDisplayName(a.FullName) + } + if a.EffectiveName != "" { + c.SetUserName(a.EffectiveName) + } + for i := range a.GroupMembershipSIDs { + c.AddAuthzAttribute(a.GroupMembershipSIDs[i]) + } +} + +// Methods to implement goidentity.Identity interface + +// UserName returns the credential's username. +func (c *Credentials) UserName() string { + return c.username +} + +// SetUserName sets the username value on the credential. +func (c *Credentials) SetUserName(s string) { + c.username = s +} + +// CName returns the credential's client principal name. +func (c *Credentials) CName() types.PrincipalName { + return c.cname +} + +// SetCName sets the client principal name on the credential. +func (c *Credentials) SetCName(pn types.PrincipalName) { + c.cname = pn +} + +// Domain returns the credential's domain. +func (c *Credentials) Domain() string { + return c.realm +} + +// SetDomain sets the domain value on the credential. +func (c *Credentials) SetDomain(s string) { + c.realm = s +} + +// Realm returns the credential's realm. Same as the domain. +func (c *Credentials) Realm() string { + return c.Domain() +} + +// SetRealm sets the realm value on the credential. Same as the domain +func (c *Credentials) SetRealm(s string) { + c.SetDomain(s) +} + +// DisplayName returns the credential's display name. +func (c *Credentials) DisplayName() string { + return c.displayName +} + +// SetDisplayName sets the display name value on the credential. +func (c *Credentials) SetDisplayName(s string) { + c.displayName = s +} + +// Human returns if the credential represents a human or not. +func (c *Credentials) Human() bool { + return c.human +} + +// SetHuman sets the credential as human. +func (c *Credentials) SetHuman(b bool) { + c.human = b +} + +// AuthTime returns the time the credential was authenticated. +func (c *Credentials) AuthTime() time.Time { + return c.authTime +} + +// SetAuthTime sets the time the credential was authenticated. +func (c *Credentials) SetAuthTime(t time.Time) { + c.authTime = t +} + +// AuthzAttributes returns the credentials authorizing attributes. +func (c *Credentials) AuthzAttributes() []string { + s := make([]string, len(c.groupMembership)) + i := 0 + for a := range c.groupMembership { + s[i] = a + i++ + } + return s +} + +// Authenticated indicates if the credential has been successfully authenticated or not. +func (c *Credentials) Authenticated() bool { + return c.authenticated +} + +// SetAuthenticated sets the credential as having been successfully authenticated. +func (c *Credentials) SetAuthenticated(b bool) { + c.authenticated = b +} + +// AddAuthzAttribute adds an authorization attribute to the credential. +func (c *Credentials) AddAuthzAttribute(a string) { + c.groupMembership[a] = true +} + +// RemoveAuthzAttribute removes an authorization attribute from the credential. +func (c *Credentials) RemoveAuthzAttribute(a string) { + if _, ok := c.groupMembership[a]; !ok { + return + } + delete(c.groupMembership, a) +} + +// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential. +func (c *Credentials) EnableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && !enabled { + c.groupMembership[a] = true + } +} + +// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential. +func (c *Credentials) DisableAuthzAttribute(a string) { + if enabled, ok := c.groupMembership[a]; ok && enabled { + c.groupMembership[a] = false + } +} + +// Authorized indicates if the credential has the specified authorizing attribute. +func (c *Credentials) Authorized(a string) bool { + if enabled, ok := c.groupMembership[a]; ok && enabled { + return true + } + return false +} + +// SessionID returns the credential's session ID. +func (c *Credentials) SessionID() string { + return c.sessionID +} + +// Expired indicates if the credential has expired. +func (c *Credentials) Expired() bool { + if !c.validUntil.IsZero() && time.Now().UTC().After(c.validUntil) { + return true + } + return false +} + +// ValidUntil returns the credential's valid until date +func (c *Credentials) ValidUntil() time.Time { + return c.validUntil +} + +// Attributes returns the Credentials' attributes map. +func (c *Credentials) Attributes() map[string]interface{} { + return c.attributes +} + +// SetAttribute sets the value of an attribute. +func (c *Credentials) SetAttribute(k string, v interface{}) { + c.attributes[k] = v +} + +// SetAttributes replaces the attributes map with the one provided. +func (c *Credentials) SetAttributes(a map[string]interface{}) { + c.attributes = a +} + +// RemoveAttribute deletes an attribute from the attribute map that has the key provided. +func (c *Credentials) RemoveAttribute(k string) { + delete(c.attributes, k) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go new file mode 100644 index 00000000..90b5df08 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go @@ -0,0 +1,173 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// RFC 3962 +//+--------------------------------------------------------------------+ +//| protocol key format 128- or 256-bit string | +//| | +//| string-to-key function PBKDF2+DK with variable | +//| iteration count (see | +//| above) | +//| | +//| default string-to-key parameters 00 00 10 00 | +//| | +//| key-generation seed length key size | +//| | +//| random-to-key function identity function | +//| | +//| hash function, H SHA-1 | +//| | +//| HMAC output size, h 12 octets (96 bits) | +//| | +//| message block size, m 1 octet | +//| | +//| encryption/decryption functions, AES in CBC-CTS mode | +//| E and D (cipher block size 16 | +//| octets), with next-to- | +//| last block (last block | +//| if only one) as CBC-style | +//| ivec | +//+--------------------------------------------------------------------+ +// +//+--------------------------------------------------------------------+ +//| encryption types | +//+--------------------------------------------------------------------+ +//| type name etype value key size | +//+--------------------------------------------------------------------+ +//| aes128-cts-hmac-sha1-96 17 128 | +//| aes256-cts-hmac-sha1-96 18 256 | +//+--------------------------------------------------------------------+ +// +//+--------------------------------------------------------------------+ +//| checksum types | +//+--------------------------------------------------------------------+ +//| type name sumtype value length | +//+--------------------------------------------------------------------+ +//| hmac-sha1-96-aes128 15 96 | +//| hmac-sha1-96-aes256 16 96 | +//+--------------------------------------------------------------------+ + +// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96 +type Aes128CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha96) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go new file mode 100644 index 00000000..49a1b077 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go @@ -0,0 +1,135 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha256" + "hash" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128 +type Aes128CtsHmacSha256128 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes128CtsHmacSha256128) GetETypeID() int32 { + return etypeID.AES128_CTS_HMAC_SHA256_128 +} + +// GetHashID returns the checksum type ID number. +func (e Aes128CtsHmacSha256128) GetHashID() int32 { + return chksumtype.HMAC_SHA256_128_AES128 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes128CtsHmacSha256128) GetKeyByteSize() int { + return 128 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash { + return sha256.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes128CtsHmacSha256128) GetHMACBitLength() int { + return 128 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// The HMAC is calculated over the cipher state concatenated with the +// AES output, instead of being calculated over the confounder and +// plaintext. This allows the message receiver to verify the +// integrity of the message before decrypting the message. +// Therefore the pt value to this interface method is not use. Pass any []byte. +func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go new file mode 100644 index 00000000..0cdbb7ec --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go @@ -0,0 +1,173 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha1" + "hash" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// RFC 3962 +//+--------------------------------------------------------------------+ +//| protocol key format 128- or 256-bit string | +//| | +//| string-to-key function PBKDF2+DK with variable | +//| iteration count (see | +//| above) | +//| | +//| default string-to-key parameters 00 00 10 00 | +//| | +//| key-generation seed length key size | +//| | +//| random-to-key function identity function | +//| | +//| hash function, H SHA-1 | +//| | +//| HMAC output size, h 12 octets (96 bits) | +//| | +//| message block size, m 1 octet | +//| | +//| encryption/decryption functions, AES in CBC-CTS mode | +//| E and D (cipher block size 16 | +//| octets), with next-to- | +//| last block (last block | +//| if only one) as CBC-style | +//| ivec | +//+--------------------------------------------------------------------+ +// +//+--------------------------------------------------------------------+ +//| encryption types | +//+--------------------------------------------------------------------+ +//| type name etype value key size | +//+--------------------------------------------------------------------+ +//| aes128-cts-hmac-sha1-96 17 128 | +//| aes256-cts-hmac-sha1-96 18 256 | +//+--------------------------------------------------------------------+ +// +//+--------------------------------------------------------------------+ +//| checksum types | +//+--------------------------------------------------------------------+ +//| type name sumtype value length | +//+--------------------------------------------------------------------+ +//| hmac-sha1-96-aes128 15 96 | +//| hmac-sha1-96-aes256 16 96 | +//+--------------------------------------------------------------------+ + +// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96 +type Aes256CtsHmacSha96 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha96) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA1_96 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha96) GetHashID() int32 { + return chksumtype.HMAC_SHA1_96_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha96) GetKeyByteSize() int { + return 256 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string { + return "00001000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha96) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha96) GetHMACBitLength() int { + return 96 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc3962.StringToKey(secret, salt, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte { + return rfc3961.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3962.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3962.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { + return rfc3962.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3962.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveKey(protocolKey, usage, e) +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go new file mode 100644 index 00000000..562b0786 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go @@ -0,0 +1,135 @@ +package crypto + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha512" + "hash" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// RFC https://tools.ietf.org/html/rfc8009 + +// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192 +type Aes256CtsHmacSha384192 struct { +} + +// GetETypeID returns the EType ID number. +func (e Aes256CtsHmacSha384192) GetETypeID() int32 { + return etypeID.AES256_CTS_HMAC_SHA384_192 +} + +// GetHashID returns the checksum type ID number. +func (e Aes256CtsHmacSha384192) GetHashID() int32 { + return chksumtype.HMAC_SHA384_192_AES256 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Aes256CtsHmacSha384192) GetKeyByteSize() int { + return 192 / 8 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash { + return sha512.New384 +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string { + return "00008000" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int { + return aes.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Aes256CtsHmacSha384192) GetHMACBitLength() int { + return 192 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int { + return aes.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192") + return rfc8009.StringToKey(secret, saltp, s2kparams, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte { + return rfc8009.RandomToKey(b) +} + +// EncryptData encrypts the data provided. +func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc8009.EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc8009.EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) { + return rfc8009.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc8009.DecryptMessage(key, ciphertext, usage, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveKey(protocolKey, usage, e), nil +} + +// DeriveRandom generates data needed for key generation. +func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc8009.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the ciphertext message. +// The HMAC is calculated over the cipher state concatenated with the +// AES output, instead of being calculated over the confounder and +// plaintext. This allows the message receiver to verify the +// integrity of the message before decrypting the message. +// Therefore the pt value to this interface method is not use. Pass any []byte. +func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + // We don't need ib just there for the interface + return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go new file mode 100644 index 00000000..96ae5499 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go @@ -0,0 +1,143 @@ +// Package common provides encryption methods common across encryption types +package common + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +// ZeroPad pads bytes with zeros to nearest multiple of message size m. +func ZeroPad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + if l := len(b) % m; l != 0 { + n := m - l + z := make([]byte, n) + b = append(b, z...) + } + return b, nil +} + +// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m. +func PKCS7Pad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("Invalid message block size when padding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("Data not valid to pad: Zero size") + } + n := m - (len(b) % m) + pb := make([]byte, len(b)+n) + copy(pb, b) + copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n)) + return pb, nil +} + +// PKCS7Unpad removes RFC 2315 padding from byes where message size is m. +func PKCS7Unpad(b []byte, m int) ([]byte, error) { + if m <= 0 { + return nil, errors.New("invalid message block size when unpadding") + } + if b == nil || len(b) == 0 { + return nil, errors.New("padded data not valid: Zero size") + } + if len(b)%m != 0 { + return nil, errors.New("padded data not valid: Not multiple of message block size") + } + c := b[len(b)-1] + n := int(c) + if n == 0 || n > len(b) { + return nil, errors.New("padded data not valid: Data may not have been padded") + } + for i := 0; i < n; i++ { + if b[len(b)-n+i] != c { + return nil, errors.New("padded data not valid") + } + } + return b[:len(b)-n], nil +} + +// GetHash generates the keyed hash value according to the etype's hash function. +func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) { + k, err := etype.DeriveKey(key, usage) + if err != nil { + return nil, fmt.Errorf("unable to derive key for checksum: %v", err) + } + mac := hmac.New(etype.GetHashFunc(), k) + p := make([]byte, len(pt)) + copy(p, pt) + mac.Write(p) + return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKc(usage), etype) +} + +// GetIntegrityHash returns a keyed integrity hash of the bytes provided. +func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { + return GetHash(b, key, GetUsageKi(usage), etype) +} + +// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided. +func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool { + //The ciphertext output is the concatenation of the output of the basic + //encryption function E and a (possibly truncated) HMAC using the + //specified hash function H, both applied to the plaintext with a + //random confounder prefix and sufficient padding to bring it to a + //multiple of the message block size. When the HMAC is computed, the + //key is used in the protocol key form. + expectedMAC, _ := GetChecksumHash(msg, key, usage, etype) + return hmac.Equal(chksum, expectedMAC) +} + +// GetUsageKc returns the checksum key usage value for the usage number un. +// +// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. +// +// Kc = DK(base-key, usage | 0x99); +func GetUsageKc(un uint32) []byte { + return getUsage(un, 0x99) +} + +// GetUsageKe returns the encryption key usage value for the usage number un +// +// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. +// +// Ke = DK(base-key, usage | 0xAA); +func GetUsageKe(un uint32) []byte { + return getUsage(un, 0xAA) +} + +// GetUsageKi returns the integrity key usage value for the usage number un +// +// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. +// +// Ki = DK(base-key, usage | 0x55); +func GetUsageKi(un uint32) []byte { + return getUsage(un, 0x55) +} + +func getUsage(un uint32, o byte) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.BigEndian, un) + return append(buf.Bytes(), o) +} + +// IterationsToS2Kparams converts the number of iterations as an integer to a string representation. +func IterationsToS2Kparams(i uint32) string { + b := make([]byte, 4, 4) + binary.BigEndian.PutUint32(b, i) + return hex.EncodeToString(b) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go new file mode 100644 index 00000000..e04e9681 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go @@ -0,0 +1,175 @@ +// Package crypto implements cryptographic functions for Kerberos 5 implementation. +package crypto + +import ( + "encoding/hex" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" + "gopkg.in/jcmturner/gokrb5.v7/iana/patype" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// GetEtype returns an instances of the required etype struct for the etype ID. +func GetEtype(id int32) (etype.EType, error) { + switch id { + case etypeID.AES128_CTS_HMAC_SHA1_96: + var et Aes128CtsHmacSha96 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA1_96: + var et Aes256CtsHmacSha96 + return et, nil + case etypeID.AES128_CTS_HMAC_SHA256_128: + var et Aes128CtsHmacSha256128 + return et, nil + case etypeID.AES256_CTS_HMAC_SHA384_192: + var et Aes256CtsHmacSha384192 + return et, nil + case etypeID.DES3_CBC_SHA1_KD: + var et Des3CbcSha1Kd + return et, nil + case etypeID.RC4_HMAC: + var et RC4HMAC + return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported EType: %d", id) + } +} + +// GetChksumEtype returns an instances of the required etype struct for the checksum ID. +func GetChksumEtype(id int32) (etype.EType, error) { + switch id { + case chksumtype.HMAC_SHA1_96_AES128: + var et Aes128CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA1_96_AES256: + var et Aes256CtsHmacSha96 + return et, nil + case chksumtype.HMAC_SHA256_128_AES128: + var et Aes128CtsHmacSha256128 + return et, nil + case chksumtype.HMAC_SHA384_192_AES256: + var et Aes256CtsHmacSha384192 + return et, nil + case chksumtype.HMAC_SHA1_DES3_KD: + var et Des3CbcSha1Kd + return et, nil + case chksumtype.KERB_CHECKSUM_HMAC_MD5: + var et RC4HMAC + return et, nil + //case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + // var et RC4HMAC + // return et, nil + default: + return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id) + } +} + +// GetKeyFromPassword generates an encryption key from the principal's password. +func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) { + var key types.EncryptionKey + et, err := GetEtype(etypeID) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + sk2p := et.GetDefaultStringToKeyParams() + var salt string + var paID int32 + for _, pa := range pas { + switch pa.PADataType { + case patype.PA_PW_SALT: + if paID > pa.PADataType { + continue + } + salt = string(pa.PADataValue) + case patype.PA_ETYPE_INFO: + if paID > pa.PADataType { + continue + } + var eti types.ETypeInfo + err := eti.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != eti[0].EType { + et, err = GetEtype(eti[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + salt = string(eti[0].Salt) + case patype.PA_ETYPE_INFO2: + if paID > pa.PADataType { + continue + } + var et2 types.ETypeInfo2 + err := et2.Unmarshal(pa.PADataValue) + if err != nil { + return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err) + } + if etypeID != et2[0].EType { + et, err = GetEtype(et2[0].EType) + if err != nil { + return key, et, fmt.Errorf("error getting encryption type: %v", err) + } + } + if len(et2[0].S2KParams) == 4 { + sk2p = hex.EncodeToString(et2[0].S2KParams) + } + salt = et2[0].Salt + } + } + if salt == "" { + salt = cname.GetSalt(realm) + } + k, err := et.StringToKey(passwd, salt, sk2p) + if err != nil { + return key, et, fmt.Errorf("error deriving key from string: %+v", err) + } + key = types.EncryptionKey{ + KeyType: etypeID, + KeyValue: k, + } + return key, et, nil +} + +// GetEncryptedData encrypts the data provided and returns and EncryptedData type. +// Pass a usage value of zero to use the key provided directly rather than deriving one. +func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) { + var ed types.EncryptedData + et, err := GetEtype(key.KeyType) + if err != nil { + return ed, fmt.Errorf("error getting etype: %v", err) + } + _, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage) + if err != nil { + return ed, err + } + + ed = types.EncryptedData{ + EType: key.KeyType, + Cipher: b, + KVNO: kvno, + } + return ed, nil +} + +// DecryptEncPart decrypts the EncryptedData. +func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) { + return DecryptMessage(ed.Cipher, key, usage) +} + +// DecryptMessage decrypts the ciphertext and verifies the integrity. +func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) { + et, err := GetEtype(key.KeyType) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting: %v", err) + } + b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go new file mode 100644 index 00000000..db3a149a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go @@ -0,0 +1,174 @@ +package crypto + +import ( + "crypto/des" + "crypto/hmac" + "crypto/sha1" + "errors" + "hash" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +//RFC: 3961 Section 6.3 + +/* + des3-cbc-hmac-sha1-kd, hmac-sha1-des3-kd + ------------------------------------------------ + protocol key format 24 bytes, parity in low + bit of each + + key-generation seed 21 bytes + length + + hash function SHA-1 + + HMAC output size 160 bits + + message block size 8 bytes + + default string-to-key empty string + params + + encryption and triple-DES encrypt and + decryption functions decrypt, in outer-CBC + mode (cipher block size + 8 octets) + + key generation functions: + + random-to-key DES3random-to-key (see + below) + + string-to-key DES3string-to-key (see + below) + + The des3-cbc-hmac-sha1-kd encryption type is assigned the value + sixteen (16). The hmac-sha1-des3-kd checksum algorithm is assigned a + checksum type number of twelve (12)*/ + +// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd +type Des3CbcSha1Kd struct { +} + +// GetETypeID returns the EType ID number. +func (e Des3CbcSha1Kd) GetETypeID() int32 { + return etypeID.DES3_CBC_SHA1_KD +} + +// GetHashID returns the checksum type ID number. +func (e Des3CbcSha1Kd) GetHashID() int32 { + return chksumtype.HMAC_SHA1_DES3_KD +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e Des3CbcSha1Kd) GetKeyByteSize() int { + return 24 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e Des3CbcSha1Kd) GetKeySeedBitLength() int { + return 21 * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash { + return sha1.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int { + //For traditional CBC mode with padding, it would be the underlying cipher's block size + return des.BlockSize +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string { + var s string + return s +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e Des3CbcSha1Kd) GetConfounderByteSize() int { + return des.BlockSize +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e Des3CbcSha1Kd) GetHMACBitLength() int { + return e.GetHashFunc()().Size() * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int { + return des.BlockSize * 8 +} + +// StringToKey returns a key derived from the string provided. +func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + if s2kparams != "" { + return []byte{}, errors.New("s2kparams must be an empty string") + } + return rfc3961.DES3StringToKey(secret, salt, e) +} + +// RandomToKey returns a key from the bytes provided. +func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte { + return rfc3961.DES3RandomToKey(b) +} + +// DeriveRandom generates data needed for key generation. +func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + r, err := rfc3961.DeriveRandom(protocolKey, usage, e) + return r, err +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// EncryptData encrypts the data provided. +func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) { + return rfc3961.DES3EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + return rfc3961.DES3EncryptMessage(key, message, usage, e) +} + +// DecryptData decrypts the data provided. +func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) { + return rfc3961.DES3DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + c, err := e.GetChecksumHash(protocolKey, data, usage) + if err != nil { + return false + } + return hmac.Equal(chksum, c) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go new file mode 100644 index 00000000..ee7510e2 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go @@ -0,0 +1,29 @@ +// Package etype provides the Kerberos Encryption Type interface +package etype + +import "hash" + +// EType is the interface defining the Encryption Type. +type EType interface { + GetETypeID() int32 + GetHashID() int32 + GetKeyByteSize() int + GetKeySeedBitLength() int // key-generation seed length, k + GetDefaultStringToKeyParams() string // default string-to-key parameters (s2kparams) + StringToKey(string, salt, s2kparams string) ([]byte, error) // string-to-key (UTF-8 string, UTF-8 string, opaque)->(protocol-key) + RandomToKey(b []byte) []byte // random-to-key (bitstring[K])->(protocol-key) + GetHMACBitLength() int // HMAC output size, h + GetMessageBlockByteSize() int // message block size, m + EncryptData(key, data []byte) ([]byte, []byte, error) // E function - encrypt (specific-key, state, octet string)->(state, octet string) + EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) + DecryptData(key, data []byte) ([]byte, error) // D function + DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) + GetCypherBlockBitLength() int // cipher block size, c + GetConfounderByteSize() int // This is the same as the cipher block size but in bytes. + DeriveKey(protocolKey, usage []byte) ([]byte, error) // DK key-derivation (protocol-key, integer)->(specific-key) + DeriveRandom(protocolKey, usage []byte) ([]byte, error) // DR pseudo-random (protocol-key, octet-string)->(octet-string) + VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool + GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) + VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool + GetHashFunc() func() hash.Hash +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go new file mode 100644 index 00000000..9df55eed --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go @@ -0,0 +1,135 @@ +package crypto + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "hash" + "io" + + "golang.org/x/crypto/md4" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961" + "gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757" + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +//http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8u40-b25/sun/security/krb5/internal/crypto/dk/ArcFourCrypto.java#ArcFourCrypto.encrypt%28byte%5B%5D%2Cint%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cint%2Cint%29 + +// RC4HMAC implements Kerberos encryption type aes256-cts-hmac-sha1-96 +type RC4HMAC struct { +} + +// GetETypeID returns the EType ID number. +func (e RC4HMAC) GetETypeID() int32 { + return etypeID.RC4_HMAC +} + +// GetHashID returns the checksum type ID number. +func (e RC4HMAC) GetHashID() int32 { + return chksumtype.KERB_CHECKSUM_HMAC_MD5 +} + +// GetKeyByteSize returns the number of bytes for key of this etype. +func (e RC4HMAC) GetKeyByteSize() int { + return 16 +} + +// GetKeySeedBitLength returns the number of bits for the seed for key generation. +func (e RC4HMAC) GetKeySeedBitLength() int { + return e.GetKeyByteSize() * 8 +} + +// GetHashFunc returns the hash function for this etype. +func (e RC4HMAC) GetHashFunc() func() hash.Hash { + return md5.New +} + +// GetMessageBlockByteSize returns the block size for the etype's messages. +func (e RC4HMAC) GetMessageBlockByteSize() int { + return 1 +} + +// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. +func (e RC4HMAC) GetDefaultStringToKeyParams() string { + return "" +} + +// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. +func (e RC4HMAC) GetConfounderByteSize() int { + return 8 +} + +// GetHMACBitLength returns the bit count size of the integrity hash. +func (e RC4HMAC) GetHMACBitLength() int { + return md5.Size * 8 +} + +// GetCypherBlockBitLength returns the bit count size of the cypher block. +func (e RC4HMAC) GetCypherBlockBitLength() int { + return 8 // doesn't really apply +} + +// StringToKey returns a key derived from the string provided. +func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { + return rfc4757.StringToKey(secret) +} + +// RandomToKey returns a key from the bytes provided. +func (e RC4HMAC) RandomToKey(b []byte) []byte { + r := bytes.NewReader(b) + h := md4.New() + io.Copy(h, r) + return h.Sum(nil) +} + +// EncryptData encrypts the data provided. +func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) { + b, err := rfc4757.EncryptData(key, data, e) + return []byte{}, b, err +} + +// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. +func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { + b, err := rfc4757.EncryptMessage(key, message, usage, false, e) + return []byte{}, b, err +} + +// DecryptData decrypts the data provided. +func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) { + return rfc4757.DecryptData(key, data, e) +} + +// DecryptMessage decrypts the message provided and verifies the integrity of the message. +func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { + return rfc4757.DecryptMessage(key, ciphertext, usage, false, e) +} + +// DeriveKey derives a key from the protocol key based on the usage value. +func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) { + return rfc4757.HMAC(protocolKey, usage), nil +} + +// DeriveRandom generates data needed for key generation. +func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { + return rfc3961.DeriveRandom(protocolKey, usage, e) +} + +// VerifyIntegrity checks the integrity of the plaintext message. +func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { + return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e) +} + +// GetChecksumHash returns a keyed checksum hash of the bytes provided. +func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { + return rfc4757.Checksum(protocolKey, usage, data) +} + +// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. +func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { + checksum, err := rfc4757.Checksum(protocolKey, usage, data) + if err != nil { + return false + } + return hmac.Equal(checksum, chksum) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go new file mode 100644 index 00000000..6f550fa8 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go @@ -0,0 +1,125 @@ +// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961 +package rfc3961 + +import ( + "crypto/cipher" + "crypto/des" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided. +func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize()) + + block, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, nil, fmt.Errorf("error creating cipher: %v", err) + } + + //RFC 3961: initial cipher state All bits zero + ivz := make([]byte, des.BlockSize) + + ct := make([]byte, len(data)) + mode := cipher.NewCBCEncrypter(block, ivz) + mode.CryptBlocks(ct, data) + return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil +} + +// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize()) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided. +func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + + if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 { + return []byte{}, errors.New("ciphertext is not a multiple of the block size") + } + block, err := des.NewTripleDESCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating cipher: %v", err) + } + pt := make([]byte, len(data)) + ivz := make([]byte, des.BlockSize) + mode := cipher.NewCBCDecrypter(block, ivz) + mode.CryptBlocks(pt, data) + return pt, nil +} + +// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided. +// The integrity of the message is also verified. +func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, fmt.Errorf("error decrypting: %v", err) + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("error decrypting: integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool { + //The ciphertext output is the concatenation of the output of the basic + //encryption function E and a (possibly truncated) HMAC using the + //specified hash function H, both applied to the plaintext with a + //random confounder prefix and sufficient padding to bring it to a + //multiple of the message block size. When the HMAC is computed, the + //key is used in the protocol key form. + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go new file mode 100644 index 00000000..8c637a22 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go @@ -0,0 +1,178 @@ +package rfc3961 + +import ( + "bytes" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +const ( + prfconstant = "prf" +) + +// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)). +// +// key: base key or protocol key. Likely to be a key from a keytab file. +// +// usage: a constant. +// +// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes. +// +// k: key length / key seed length in bits. Eg. for AES256 this value is 256. +// +// e: the encryption etype function to use. +func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) { + n := e.GetCypherBlockBitLength() + k := e.GetKeySeedBitLength() + //Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be. + nFoldUsage := Nfold(usage, n) + //k-truncate implemented by creating a byte array the size of k (k is in bits hence /8) + out := make([]byte, k/8) + + /*If the output of E is shorter than k bits, it is fed back into the encryption as many times as necessary. + The construct is as follows (where | indicates concatenation): + + K1 = E(Key, n-fold(Constant), initial-cipher-state) + K2 = E(Key, K1, initial-cipher-state) + K3 = E(Key, K2, initial-cipher-state) + K4 = ... + + DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)*/ + _, K, err := e.EncryptData(key, nFoldUsage) + if err != nil { + return out, err + } + for i := copy(out, K); i < len(out); { + _, K, _ = e.EncryptData(key, K) + i = i + copy(out[i:], K) + } + return out, nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + r, err := e.DeriveRandom(protocolKey, usage) + if err != nil { + return nil, err + } + return e.RandomToKey(r), nil +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961. +func RandomToKey(b []byte) []byte { + return b +} + +// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes. +func DES3RandomToKey(b []byte) []byte { + r := fixWeakKey(stretch56Bits(b[:7])) + r2 := fixWeakKey(stretch56Bits(b[7:14])) + r = append(r, r2...) + r3 := fixWeakKey(stretch56Bits(b[14:21])) + r = append(r, r3...) + return r +} + +// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes. +func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) { + s := secret + salt + tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength())) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// PseudoRandom function as defined in RFC 3961 +func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + h.Write(b) + tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()] + k, err := e.DeriveKey(key, []byte(prfconstant)) + if err != nil { + return []byte{}, err + } + _, prf, err := e.EncryptData(k, tmp) + if err != nil { + return []byte{}, err + } + return prf, nil +} + +func stretch56Bits(b []byte) []byte { + d := make([]byte, len(b), len(b)) + copy(d, b) + var lb byte + for i, v := range d { + bv, nb := calcEvenParity(v) + d[i] = nb + if bv != 0 { + lb = lb | (1 << uint(i+1)) + } else { + lb = lb &^ (1 << uint(i+1)) + } + } + _, lb = calcEvenParity(lb) + d = append(d, lb) + return d +} + +func calcEvenParity(b byte) (uint8, uint8) { + lowestbit := b & 0x01 + // c counter of 1s in the first 7 bits of the byte + var c int + // Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s. + for p := 1; p < 8; p++ { + v := b & (1 << uint(p)) + if v != 0 { + c++ + } + } + if c%2 == 0 { + //Even number of 1s so set parity to 1 + b = b | 1 + } else { + //Odd number of 1s so set parity to 0 + b = b &^ 1 + } + return lowestbit, b +} + +func fixWeakKey(b []byte) []byte { + if weak(b) { + b[7] ^= 0xF0 + } + return b +} + +func weak(b []byte) bool { + // weak keys from https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-67r1.pdf + weakKeys := [4][]byte{ + {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, + {0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE}, + {0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1}, + {0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E}, + } + semiWeakKeys := [12][]byte{ + {0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E}, + {0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01}, + {0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1}, + {0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01}, + {0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE}, + {0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01}, + {0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1}, + {0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E}, + {0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE}, + {0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E}, + {0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE}, + {0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1}, + } + for _, k := range weakKeys { + if bytes.Equal(b, k) { + return true + } + } + for _, k := range semiWeakKeys { + if bytes.Equal(b, k) { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go new file mode 100644 index 00000000..779d1c6e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go @@ -0,0 +1,128 @@ +package rfc3961 + +/* +Implementation of the n-fold algorithm as defined in RFC 3961. + +n-fold is an algorithm that takes m input bits and "stretches" them +to form n output bits with equal contribution from each input bit to +the output, as described in [Blumenthal96]: + +We first define a primitive called n-folding, which takes a +variable-length input block and produces a fixed-length output +sequence. The intent is to give each input bit approximately +equal weight in determining the value of each output bit. Note +that whenever we need to treat a string of octets as a number, the +assumed representation is Big-Endian -- Most Significant Byte +first. + +To n-fold a number X, replicate the input value to a length that +is the least common multiple of n and the length of X. Before +each repetition, the input is rotated to the right by 13 bit +positions. The successive n-bit chunks are added together using +1's-complement addition (that is, with end-around carry) to yield +a n-bit result.... +*/ + +/* Credits +This golang implementation of nfold used the following project for help with implementation detail. +Although their source is in java it was helpful as a reference implementation of the RFC. +You can find the source code of their open source project along with license information below. +We acknowledge and are grateful to these developers for their contributions to open source + +Project: Apache Directory (http://http://directory.apache.org/) +https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java +License: http://www.apache.org/licenses/LICENSE-2.0 +*/ + +// Nfold expands the key to ensure it is not smaller than one cipher block. +// Defined in RFC 3961. +// +// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m. +func Nfold(m []byte, n int) []byte { + k := len(m) * 8 + + //Get the lowest common multiple of the two bit sizes + lcm := lcm(n, k) + relicate := lcm / k + var sumBytes []byte + + for i := 0; i < relicate; i++ { + rotation := 13 * i + sumBytes = append(sumBytes, rotateRight(m, rotation)...) + } + + nfold := make([]byte, n/8) + sum := make([]byte, n/8) + for i := 0; i < lcm/n; i++ { + for j := 0; j < n/8; j++ { + sum[j] = sumBytes[j+(i*len(sum))] + } + nfold = onesComplementAddition(nfold, sum) + } + return nfold +} + +func onesComplementAddition(n1, n2 []byte) []byte { + numBits := len(n1) * 8 + out := make([]byte, numBits/8) + carry := 0 + for i := numBits - 1; i > -1; i-- { + n1b := getBit(&n1, i) + n2b := getBit(&n2, i) + s := n1b + n2b + carry + + if s == 0 || s == 1 { + setBit(&out, i, s) + carry = 0 + } else if s == 2 { + carry = 1 + } else if s == 3 { + setBit(&out, i, 1) + carry = 1 + } + } + if carry == 1 { + carryArray := make([]byte, len(n1)) + carryArray[len(carryArray)-1] = 1 + out = onesComplementAddition(out, carryArray) + } + return out +} + +func rotateRight(b []byte, step int) []byte { + out := make([]byte, len(b)) + bitLen := len(b) * 8 + for i := 0; i < bitLen; i++ { + v := getBit(&b, i) + setBit(&out, (i+step)%bitLen, v) + } + return out +} + +func lcm(x, y int) int { + return (x * y) / gcd(x, y) +} + +func gcd(x, y int) int { + for y != 0 { + x, y = y, x%y + } + return x +} + +func getBit(b *[]byte, p int) int { + pByte := p / 8 + pBit := uint(p % 8) + vByte := (*b)[pByte] + vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001) + return vInt +} + +func setBit(b *[]byte, p, v int) { + pByte := p / 8 + pBit := uint(p % 8) + oldByte := (*b)[pByte] + var newByte byte + newByte = byte(v<<(8-(pBit+1))) | oldByte + (*b)[pByte] = newByte +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go new file mode 100644 index 00000000..2be2fde0 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go @@ -0,0 +1,89 @@ +// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962 +package rfc3962 + +import ( + "crypto/rand" + "errors" + "fmt" + + "gopkg.in/jcmturner/aescts.v1" + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + // Generate and append integrity hash + ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, e.GetCypherBlockBitLength()/8) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go new file mode 100644 index 00000000..a5f45c1c --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go @@ -0,0 +1,58 @@ +package rfc3962 + +import ( + "encoding/binary" + "encoding/hex" + "errors" + + "github.com/jcmturner/gofork/x/crypto/pbkdf2" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +const ( + s2kParamsZero = 4294967296 +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte { + return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc()) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961. +func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer +func S2KparamsToItertions(s2kparams string) (int64, error) { + //process s2kparams string + //The parameter string is four octets indicating an unsigned + //number in big-endian order. This is the number of iterations to be + //performed. If the value is 00 00 00 00, the number of iterations to + //be performed is 4,294,967,296 (2**32). + var i uint32 + if len(s2kparams) != 8 { + return int64(s2kParamsZero), errors.New("invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + //buf := bytes.NewBuffer(b) + //err = binary.Read(buf, binary.BigEndian, &i) + if err != nil { + return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot convert to big endian int32") + } + return int64(i), nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go new file mode 100644 index 00000000..45276e95 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go @@ -0,0 +1,40 @@ +package rfc4757 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "io" +) + +// Checksum returns a hash of the data in accordance with RFC 4757 +func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) { + // Create hashing key + s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end + mac := hmac.New(md5.New, key) + mac.Write(s) + Ksign := mac.Sum(nil) + + // Format data + tb := UsageToMSMsgType(usage) + p := append(tb, data...) + h := md5.New() + rb := bytes.NewReader(p) + _, err := io.Copy(h, rb) + if err != nil { + return []byte{}, err + } + tmp := h.Sum(nil) + + // Generate HMAC + mac = hmac.New(md5.New, Ksign) + mac.Write(tmp) + return mac.Sum(nil), nil +} + +// HMAC returns a keyed MD5 checksum of the data +func HMAC(key []byte, data []byte) []byte { + mac := hmac.New(md5.New, key) + mac.Write(data) + return mac.Sum(nil) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go new file mode 100644 index 00000000..0ec8b992 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go @@ -0,0 +1,80 @@ +// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757 +package rfc4757 + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/rc4" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757. +func EncryptData(key, data []byte, e etype.EType) ([]byte, error) { + if len(key) != e.GetKeyByteSize() { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + rc4Cipher, err := rc4.NewCipher(key) + if err != nil { + return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err) + } + ed := make([]byte, len(data)) + copy(ed, data) + rc4Cipher.XORKeyStream(ed, ed) + rc4Cipher.Reset() + return ed, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + return EncryptData(key, data, e) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message. +func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + confounder := make([]byte, e.GetConfounderByteSize()) // size = 8 + _, err := rand.Read(confounder) + if err != nil { + return []byte{}, fmt.Errorf("error generating confounder: %v", err) + } + k1 := key + k2 := HMAC(k1, UsageToMSMsgType(usage)) + toenc := append(confounder, data...) + chksum := HMAC(k2, toenc) + k3 := HMAC(k2, chksum) + + ed, err := EncryptData(k3, toenc, e) + if err != nil { + return []byte{}, fmt.Errorf("error encrypting data: %v", err) + } + + msg := append(chksum, ed...) + return msg, nil +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. +// The integrity of the message is also verified. +func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { + checksum := data[:e.GetHMACBitLength()/8] + ct := data[e.GetHMACBitLength()/8:] + _, k2, k3 := deriveKeys(key, checksum, usage, export) + + pt, err := DecryptData(k3, ct, e) + if err != nil { + return []byte{}, fmt.Errorf("error decrypting data: %v", err) + } + + if !VerifyIntegrity(k2, pt, data, e) { + return []byte{}, errors.New("integrity checksum incorrect") + } + return pt[e.GetConfounderByteSize():], nil +} + +// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data. +func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool { + chksum := HMAC(key, pt) + return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8]) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go new file mode 100644 index 00000000..5e7ec480 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go @@ -0,0 +1,55 @@ +package rfc4757 + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/md4" +) + +// StringToKey returns a key derived from the string provided according to the definition in RFC 4757. +func StringToKey(secret string) ([]byte, error) { + b := make([]byte, len(secret)*2, len(secret)*2) + for i, r := range secret { + u := fmt.Sprintf("%04x", r) + c, err := hex.DecodeString(u) + if err != nil { + return []byte{}, errors.New("character could not be encoded") + } + // Swap round the two bytes to make little endian as we put into byte slice + b[2*i] = c[1] + b[2*i+1] = c[0] + } + r := bytes.NewReader(b) + h := md4.New() + _, err := io.Copy(h, r) + if err != nil { + return []byte{}, err + } + return h.Sum(nil), nil +} + +func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) { + //if export { + // L40 := make([]byte, 14, 14) + // copy(L40, []byte(`fortybits`)) + // k1 = HMAC(key, L40) + //} else { + // tb := MessageTypeBytes(usage) + // k1 = HMAC(key, tb) + //} + //k2 = k1[:16] + //if export { + // mask := []byte{0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB} + // copy(k1[7:16], mask) + //} + //k3 = HMAC(k1, checksum) + //return + k1 = key + k2 = HMAC(k1, UsageToMSMsgType(usage)) + k3 = HMAC(k2, checksum) + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go new file mode 100644 index 00000000..068588d3 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go @@ -0,0 +1,20 @@ +package rfc4757 + +import "encoding/binary" + +// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice. +func UsageToMSMsgType(usage uint32) []byte { + // Translate usage numbers to the Microsoft T numbers + switch usage { + case 3: + usage = 8 + case 9: + usage = 8 + case 23: + usage = 13 + } + // Now convert to bytes + tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes + binary.PutUvarint(tb, uint64(usage)) + return tb +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go new file mode 100644 index 00000000..86aae098 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go @@ -0,0 +1,128 @@ +// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009 +package rfc8009 + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/rand" + "errors" + "fmt" + + "gopkg.in/jcmturner/aescts.v1" + "gopkg.in/jcmturner/gokrb5.v7/crypto/common" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009. +func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Encrypt(key, ivz, data) +} + +// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The encrypted data is concatenated with its integrity hash to create an encrypted message. +func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + if len(key) != e.GetKeyByteSize() { + } + //confounder + c := make([]byte, e.GetConfounderByteSize()) + _, err := rand.Read(c) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) + } + plainBytes := append(c, message...) + + // Derive key for encryption from usage + var k []byte + if usage != 0 { + k, err = e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) + } + } + + // Encrypt the data + iv, b, err := e.EncryptData(k, plainBytes) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + + ivz := make([]byte, e.GetConfounderByteSize()) + ih, err := GetIntegityHash(ivz, b, key, usage, e) + if err != nil { + return iv, b, fmt.Errorf("error encrypting data: %v", err) + } + b = append(b, ih...) + return iv, b, nil +} + +// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009. +func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + if len(key) != kl { + return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) + } + ivz := make([]byte, aes.BlockSize) + return aescts.Decrypt(key, ivz, data) +} + +// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. +// The integrity of the message is also verified. +func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { + //Derive the key + k, err := e.DeriveKey(key, common.GetUsageKe(usage)) + if err != nil { + return nil, fmt.Errorf("error deriving key: %v", err) + } + // Strip off the checksum from the end + b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) + if err != nil { + return nil, err + } + //Verify checksum + if !e.VerifyIntegrity(key, ciphertext, b, usage) { + return nil, errors.New("integrity verification failed") + } + //Remove the confounder bytes + return b[e.GetConfounderByteSize():], nil +} + +// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009 +func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) { + // Generate and append integrity hash + // The HMAC is calculated over the cipher state concatenated with the + // AES output, instead of being calculated over the confounder and + // plaintext. This allows the message receiver to verify the + // integrity of the message before decrypting the message. + // H = HMAC(Ki, IV | C) + ib := append(iv, c...) + return common.GetIntegrityHash(ib, key, usage, e) +} + +// VerifyIntegrity verifies the integrity of cipertext bytes ct. +func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool { + h := make([]byte, etype.GetHMACBitLength()/8) + copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) + ivz := make([]byte, etype.GetConfounderByteSize()) + ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...) + expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype) + return hmac.Equal(h, expectedMAC) +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go new file mode 100644 index 00000000..90ced3b5 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go @@ -0,0 +1,144 @@ +package rfc8009 + +import ( + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + + "golang.org/x/crypto/pbkdf2" + "gopkg.in/jcmturner/gokrb5.v7/crypto/etype" + "gopkg.in/jcmturner/gokrb5.v7/iana/etypeID" +) + +const ( + s2kParamsZero = 32768 +) + +// DeriveRandom for key derivation as defined in RFC 8009 +func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) { + h := e.GetHashFunc()() + return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil +} + +// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. +// +// https://tools.ietf.org/html/rfc8009#section-5 +// +// If the enctype is aes128-cts-hmac-sha256-128: +// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128) +// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128) +// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128) +// +// If the enctype is aes256-cts-hmac-sha384-192: +// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192) +// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256) +// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192) +func DeriveKey(protocolKey, label []byte, e etype.EType) []byte { + var context []byte + var kl int + // Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos") + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + switch label[len(label)-1] { + case 0x73: + // 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos" + kerblabel := []byte("kerberos") + if len(label) != len(kerblabel) { + break + } + for i, b := range label { + if b != kerblabel[i] { + kl = e.GetKeySeedBitLength() + break + } + } + if kl == 0 { + // This is StringToKey + kl = 256 + } + case 0xAA: + // This is a Ke + kl = 256 + } + } + if kl == 0 { + kl = e.GetKeySeedBitLength() + } + return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e)) +} + +// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009. +func RandomToKey(b []byte) []byte { + return b +} + +// StringToKey returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { + i, err := S2KparamsToItertions(s2kparams) + if err != nil { + return nil, err + } + return StringToKeyIter(secret, salt, i, e) +} + +// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009. +func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) { + tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) + return e.DeriveKey(tkey, []byte("kerberos")) +} + +// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 +func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte { + kl := e.GetKeyByteSize() + if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { + kl = 32 + } + return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc()) +} + +// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3 +func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte { + //k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes. + k := make([]byte, 4, 4) + binary.BigEndian.PutUint32(k, uint32(kl)) + + c := make([]byte, 4, 4) + binary.BigEndian.PutUint32(c, uint32(1)) + c = append(c, label...) + c = append(c, byte(0)) + if len(context) > 0 { + c = append(c, context...) + } + c = append(c, k...) + + mac := hmac.New(e.GetHashFunc(), protocolKey) + mac.Write(c) + return mac.Sum(nil)[:(kl / 8)] +} + +// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4 +func GetSaltP(salt, ename string) string { + b := []byte(ename) + b = append(b, byte(0)) + b = append(b, []byte(salt)...) + return string(b) +} + +// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009. +func S2KparamsToItertions(s2kparams string) (int, error) { + var i uint32 + if len(s2kparams) != 8 { + return s2kParamsZero, errors.New("Invalid s2kparams length") + } + b, err := hex.DecodeString(s2kparams) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes") + } + i = binary.BigEndian.Uint32(b) + //buf := bytes.NewBuffer(b) + //err = binary.Read(buf, binary.BigEndian, &i) + if err != nil { + return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32") + } + return int(i), nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go new file mode 100644 index 00000000..856412ba --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go @@ -0,0 +1,202 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/* +From RFC 4121, section 4.2.6.1: + + Use of the GSS_GetMIC() call yields a token (referred as the MIC + token in this document), separate from the user data being protected, + which can be used to verify the integrity of that data as received. + The token has the following format: + + Octet no Name Description + -------------------------------------------------------------- + 0..1 TOK_ID Identification field. Tokens emitted by + GSS_GetMIC() contain the hex value 04 04 + expressed in big-endian order in this + field. + 2 Flags Attributes field, as described in section + 4.2.2. + 3..7 Filler Contains five octets of hex value FF. + 8..15 SND_SEQ Sequence number field in clear text, + expressed in big-endian order. + 16..last SGN_CKSUM Checksum of the "to-be-signed" data and + octet 0..15, as described in section 4.2.4. + + The Filler field is included in the checksum calculation for + simplicity. + +*/ + +const ( + // MICTokenFlagSentByAcceptor - this flag indicates the sender is the context acceptor. When not set, it indicates the sender is the context initiator + MICTokenFlagSentByAcceptor = 1 << iota + // MICTokenFlagSealed - this flag indicates confidentiality is provided for. It SHALL NOT be set in MIC tokens + MICTokenFlagSealed + // MICTokenFlagAcceptorSubkey - a subkey asserted by the context acceptor is used to protect the message + MICTokenFlagAcceptorSubkey +) + +const ( + micHdrLen = 16 // Length of the MIC Token's header +) + +// MICToken represents a GSS API MIC token, as defined in RFC 4121. +// It contains the header fields, the payload (this is not transmitted) and +// the checksum, and provides the logic for converting to/from bytes plus +// computing and verifying checksums +type MICToken struct { + // const GSS Token ID: 0x0404 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF 0xFF 0xFF 0xFF 0xFF + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + Checksum []byte // checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API MIC token +func getGSSMICTokenID() *[2]byte { + return &[2]byte{0x04, 0x04} +} + +// Return the filler bytes used in header +func fillerBytes() *[5]byte { + return &[5]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF} +} + +// Marshal the MICToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (mt *MICToken) Marshal() ([]byte, error) { + if mt.Checksum == nil { + return nil, errors.New("checksum has not been set") + } + + bytes := make([]byte, micHdrLen+len(mt.Checksum)) + copy(bytes[0:micHdrLen], mt.getMICChecksumHeader()[:]) + copy(bytes[micHdrLen:], mt.Checksum) + + return bytes, nil +} + +// SetChecksum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the Checksum field of this MICToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (mt *MICToken) SetChecksum(key types.EncryptionKey, keyUsage uint32) error { + if mt.Checksum != nil { + return errors.New("checksum has already been computed") + } + checksum, err := mt.checksum(key, keyUsage) + if err != nil { + return err + } + mt.Checksum = checksum + return nil +} + +// Compute and return the checksum of this token, computed using the passed key and key usage. +// Confirms to RFC 4121 in that the checksum will be computed over { body | header }. +// In the context of Kerberos MIC tokens, mostly keyusage GSSAPI_ACCEPTOR_SIGN (=23) +// and GSSAPI_INITIATOR_SIGN (=25) will be used. +// Note: This will NOT update the struct's Checksum field. +func (mt *MICToken) checksum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if mt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + d := make([]byte, micHdrLen+len(mt.Payload)) + copy(d[0:], mt.Payload) + copy(d[len(mt.Payload):], mt.getMICChecksumHeader()) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, d, keyUsage) +} + +// Build a header suitable for a checksum computation +func (mt *MICToken) getMICChecksumHeader() []byte { + header := make([]byte, micHdrLen) + copy(header[0:2], getGSSMICTokenID()[:]) + header[2] = mt.Flags + copy(header[3:8], fillerBytes()[:]) + binary.BigEndian.PutUint64(header[8:16], mt.SndSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, err) is returned, with err an explanatory error. +func (mt *MICToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, err := mt.checksum(key, keyUsage) + if err != nil { + return false, err + } + if !hmac.Equal(computed, mt.Checksum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(mt.Checksum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding MICToken. +// If expectFromAcceptor is true we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (mt *MICToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + if len(b) < micHdrLen { + return errors.New("bytes shorter than header length") + } + if !bytes.Equal(getGSSMICTokenID()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID, Expected %s, was %s", + hex.EncodeToString(getGSSMICTokenID()[:]), + hex.EncodeToString(b[0:2])) + } + flags := b[2] + isFromAcceptor := flags&MICTokenFlagSentByAcceptor != 0 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("unexpected acceptor flag is not set: expecting a token from the acceptor, not in the initiator") + } + if !bytes.Equal(b[3:8], fillerBytes()[:]) { + return fmt.Errorf("unexpected filler bytes: expecting %s, was %s", + hex.EncodeToString(fillerBytes()[:]), + hex.EncodeToString(b[3:8])) + } + + mt.Flags = flags + mt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + mt.Checksum = b[micHdrLen:] + return nil +} + +// NewInitiatorMICToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorMICToken(payload []byte, key types.EncryptionKey) (*MICToken, error) { + token := MICToken{ + Flags: 0x00, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetChecksum(key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md new file mode 100644 index 00000000..8fdcf70c --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md @@ -0,0 +1,20 @@ +# Notes on GSS-API Negotiation Mechanism +https://tools.ietf.org/html/rfc4178 + +Client sends an initial negotiation message to the server which specifies the list of mechanisms +the client can support in order of decreasing preference. +This message is generated with the ``NewNegTokenInitKrb5`` method. +The message generated by this function specifies only a kerberos v5 mechanism is supported. + +The RFC states that this message can optionally contain the initial mechanism token +for the preferred mechanism (KRB5 in this case) of the client. The ``NewNegTokenInitKrb5`` +includes this in the message. + +The server side responds to this message with a one of four messages: + +| Message Type/State | Description | +|--------------------|-------------| +| accept-completed | indicates that the initiator-selected mechanism was acceptable to the target, and that the security mechanism token embedded in the first negotiation message was sufficient to complete the authentication | +| accept-incomplete | At least one more message is needed from the client to establish security context. | +| reject | Negotiation is being terminated. | +| request-mic | (this state can only be present in the first reply message from the target) indicates that the MIC token exchange is REQUIRED if per-message integrity services are available | \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go new file mode 100644 index 00000000..6634c6d7 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go @@ -0,0 +1,25 @@ +package gssapi + +import "github.com/jcmturner/gofork/encoding/asn1" + +// GSS-API context flags assigned numbers. +const ( + ContextFlagDeleg = 1 + ContextFlagMutual = 2 + ContextFlagReplay = 4 + ContextFlagSequence = 8 + ContextFlagConf = 16 + ContextFlagInteg = 32 + ContextFlagAnon = 64 +) + +// ContextFlags flags for GSSAPI +type ContextFlags asn1.BitString + +// NewContextFlags creates a new ContextFlags instance. +func NewContextFlags() ContextFlags { + var c ContextFlags + c.BitLength = 32 + c.Bytes = make([]byte, 4) + return c +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go new file mode 100644 index 00000000..47754d73 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go @@ -0,0 +1,199 @@ +// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication. +package gssapi + +import ( + "context" + "fmt" + + "github.com/jcmturner/gofork/encoding/asn1" +) + +// GSS-API OID names +const ( + // GSS-API OID names + OIDKRB5 OIDName = "KRB5" // MechType OID for Kerberos 5 + OIDMSLegacyKRB5 OIDName = "MSLegacyKRB5" // MechType OID for Kerberos 5 + OIDSPNEGO OIDName = "SPNEGO" +) + +// GSS-API status values +const ( + StatusBadBindings = 1 << iota + StatusBadMech + StatusBadName + StatusBadNameType + StatusBadStatus + StatusBadSig + StatusBadMIC + StatusContextExpired + StatusCredentialsExpired + StatusDefectiveCredential + StatusDefectiveToken + StatusFailure + StatusNoContext + StatusNoCred + StatusBadQOP + StatusUnauthorized + StatusUnavailable + StatusDuplicateElement + StatusNameNotMN + StatusComplete + StatusContinueNeeded + StatusDuplicateToken + StatusOldToken + StatusUnseqToken + StatusGapToken +) + +// ContextToken is an interface for a GSS-API context token. +type ContextToken interface { + Marshal() ([]byte, error) + Unmarshal(b []byte) error + Verify() (bool, Status) + Context() context.Context +} + +/* +CREDENTIAL MANAGEMENT + +GSS_Acquire_cred acquire credentials for use +GSS_Release_cred release credentials after use +GSS_Inquire_cred display information about credentials +GSS_Add_cred construct credentials incrementally +GSS_Inquire_cred_by_mech display per-mechanism credential information + +CONTEXT-LEVEL CALLS + +GSS_Init_sec_context initiate outbound security context +GSS_Accept_sec_context accept inbound security context +GSS_Delete_sec_context flush context when no longer needed +GSS_Process_context_token process received control token on context +GSS_Context_time indicate validity time remaining on context +GSS_Inquire_context display information about context +GSS_Wrap_size_limit determine GSS_Wrap token size limit +GSS_Export_sec_context transfer context to other process +GSS_Import_sec_context import transferred context + +PER-MESSAGE CALLS + +GSS_GetMIC apply integrity check, receive as token separate from message +GSS_VerifyMIC validate integrity check token along with message +GSS_Wrap sign, optionally encrypt, encapsulate +GSS_Unwrap decapsulate, decrypt if needed, validate integrity check + +SUPPORT CALLS + +GSS_Display_status translate status codes to printable form +GSS_Indicate_mechs indicate mech_types supported on local system +GSS_Compare_name compare two names for equality +GSS_Display_name translate name to printable form +GSS_Import_name convert printable name to normalized form +GSS_Release_name free storage of normalized-form name +GSS_Release_buffer free storage of general GSS-allocated object +GSS_Release_OID_set free storage of OID set object +GSS_Create_empty_OID_set create empty OID set +GSS_Add_OID_set_member add member to OID set +GSS_Test_OID_set_member test if OID is member of OID set +GSS_Inquire_names_for_mech indicate name types supported by mechanism +GSS_Inquire_mechs_for_name indicates mechanisms supporting name type +GSS_Canonicalize_name translate name to per-mechanism form +GSS_Export_name externalize per-mechanism name +GSS_Duplicate_name duplicate name object +*/ + +// Mechanism is the GSS-API interface for authentication mechanisms. +type Mechanism interface { + OID() asn1.ObjectIdentifier + AcquireCred() error // acquire credentials for use (eg. AS exchange for KRB5) + InitSecContext() (ContextToken, error) // initiate outbound security context (eg TGS exchange builds AP_REQ to go into ContextToken to send to service) + AcceptSecContext(ct ContextToken) (bool, context.Context, Status) // service verifies the token server side to establish a context + MIC() MICToken // apply integrity check, receive as token separate from message + VerifyMIC(mt MICToken) (bool, error) // validate integrity check token along with message + Wrap(msg []byte) WrapToken // sign, optionally encrypt, encapsulate + Unwrap(wt WrapToken) []byte // decapsulate, decrypt if needed, validate integrity check +} + +// OIDName is the type for defined GSS-API OIDs. +type OIDName string + +// OID returns the OID for the provided OID name. +func OID(o OIDName) asn1.ObjectIdentifier { + switch o { + case OIDSPNEGO: + return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2} + case OIDKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} + case OIDMSLegacyKRB5: + return asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2} + } + return asn1.ObjectIdentifier{} +} + +// Status is the GSS-API status and implements the error interface. +type Status struct { + Code int + Message string +} + +// Error returns the Status description. +func (s Status) Error() string { + var str string + switch s.Code { + case StatusBadBindings: + str = "channel binding mismatch" + case StatusBadMech: + str = "unsupported mechanism requested" + case StatusBadName: + str = "invalid name provided" + case StatusBadNameType: + str = "name of unsupported type provided" + case StatusBadStatus: + str = "invalid input status selector" + case StatusBadSig: + str = "token had invalid integrity check" + case StatusBadMIC: + str = "preferred alias for GSS_S_BAD_SIG" + case StatusContextExpired: + str = "specified security context expired" + case StatusCredentialsExpired: + str = "expired credentials detected" + case StatusDefectiveCredential: + str = "defective credential detected" + case StatusDefectiveToken: + str = "defective token detected" + case StatusFailure: + str = "failure, unspecified at GSS-API level" + case StatusNoContext: + str = "no valid security context specified" + case StatusNoCred: + str = "no valid credentials provided" + case StatusBadQOP: + str = "unsupported QOP valu" + case StatusUnauthorized: + str = "operation unauthorized" + case StatusUnavailable: + str = "operation unavailable" + case StatusDuplicateElement: + str = "duplicate credential element requested" + case StatusNameNotMN: + str = "name contains multi-mechanism elements" + case StatusComplete: + str = "normal completion" + case StatusContinueNeeded: + str = "continuation call to routine required" + case StatusDuplicateToken: + str = "duplicate per-message token detected" + case StatusOldToken: + str = "timed-out per-message token detected" + case StatusUnseqToken: + str = "reordered (early) per-message token detected" + case StatusGapToken: + str = "skipped predecessor token(s) detected" + default: + str = "unknown GSS-API error status" + } + if s.Message != "" { + return fmt.Sprintf("%s: %s", str, s.Message) + } + return str +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go new file mode 100644 index 00000000..9dbf96ba --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go @@ -0,0 +1,235 @@ +package gssapi + +import ( + "bytes" + "crypto/hmac" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/* +From RFC 4121, section 4.2.6.2: + + Use of the GSS_Wrap() call yields a token (referred as the Wrap token + in this document), which consists of a descriptive header, followed + by a body portion that contains either the input user data in + plaintext concatenated with the checksum, or the input user data + encrypted. The GSS_Wrap() token SHALL have the following format: + + Octet no Name Description + -------------------------------------------------------------- + 0..1 TOK_ID Identification field. Tokens emitted by + GSS_Wrap() contain the hex value 05 04 + expressed in big-endian order in this + field. + 2 Flags Attributes field, as described in section + 4.2.2. + 3 Filler Contains the hex value FF. + 4..5 EC Contains the "extra count" field, in big- + endian order as described in section 4.2.3. + 6..7 RRC Contains the "right rotation count" in big- + endian order, as described in section + 4.2.5. + 8..15 SndSeqNum Sequence number field in clear text, + expressed in big-endian order. + 16..last Data Encrypted data for Wrap tokens with + confidentiality, or plaintext data followed + by the checksum for Wrap tokens without + confidentiality, as described in section + 4.2.4. + +Quick notes: + - "EC" or "Extra Count" refers to the length of the checksum. + - "Flags" (complete details in section 4.2.2) is a set of bits: + - if bit 0 is set, it means the token was sent by the acceptor (generally the kerberized service). + - bit 1 indicates that the token's payload is encrypted + - bit 2 indicates if the message is protected using a subkey defined by the acceptor. + - When computing checksums, EC and RRC MUST be set to 0. + - Wrap Tokens are not ASN.1 encoded. +*/ +const ( + HdrLen = 16 // Length of the Wrap Token's header + FillerByte byte = 0xFF +) + +// WrapToken represents a GSS API Wrap token, as defined in RFC 4121. +// It contains the header fields, the payload and the checksum, and provides +// the logic for converting to/from bytes plus computing and verifying checksums +type WrapToken struct { + // const GSS Token ID: 0x0504 + Flags byte // contains three flags: acceptor, sealed, acceptor subkey + // const Filler: 0xFF + EC uint16 // checksum length. big-endian + RRC uint16 // right rotation count. big-endian + SndSeqNum uint64 // sender's sequence number. big-endian + Payload []byte // your data! :) + CheckSum []byte // authenticated checksum of { payload | header } +} + +// Return the 2 bytes identifying a GSS API Wrap token +func getGssWrapTokenId() *[2]byte { + return &[2]byte{0x05, 0x04} +} + +// Marshal the WrapToken into a byte slice. +// The payload should have been set and the checksum computed, otherwise an error is returned. +func (wt *WrapToken) Marshal() ([]byte, error) { + if wt.CheckSum == nil { + return nil, errors.New("checksum has not been set") + } + if wt.Payload == nil { + return nil, errors.New("payload has not been set") + } + + pldOffset := HdrLen // Offset of the payload in the token + chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token + + bytes := make([]byte, chkSOffset+int(wt.EC)) + copy(bytes[0:], getGssWrapTokenId()[:]) + bytes[2] = wt.Flags + bytes[3] = FillerByte + binary.BigEndian.PutUint16(bytes[4:6], wt.EC) + binary.BigEndian.PutUint16(bytes[6:8], wt.RRC) + binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum) + copy(bytes[pldOffset:], wt.Payload) + copy(bytes[chkSOffset:], wt.CheckSum) + return bytes, nil +} + +// SetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and +// the header, and sets the CheckSum field of this WrapToken. +// If the payload has not been set or the checksum has already been set, an error is returned. +func (wt *WrapToken) SetCheckSum(key types.EncryptionKey, keyUsage uint32) error { + if wt.Payload == nil { + return errors.New("payload has not been set") + } + if wt.CheckSum != nil { + return errors.New("checksum has already been computed") + } + chkSum, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return cErr + } + wt.CheckSum = chkSum + return nil +} + +// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage. +// Conforms to RFC 4121 in that the checksum will be computed over { body | header }, +// with the EC and RRC flags zeroed out. +// In the context of Kerberos Wrap tokens, mostly keyusage GSSAPI_ACCEPTOR_SEAL (=22) +// and GSSAPI_INITIATOR_SEAL (=24) will be used. +// Note: This will NOT update the struct's Checksum field. +func (wt *WrapToken) computeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { + if wt.Payload == nil { + return nil, errors.New("cannot compute checksum with uninitialized payload") + } + // Build a slice containing { payload | header } + checksumMe := make([]byte, HdrLen+len(wt.Payload)) + copy(checksumMe[0:], wt.Payload) + copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum)) + + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage) +} + +// Build a header suitable for a checksum computation +func getChecksumHeader(flags byte, senderSeqNum uint64) []byte { + header := make([]byte, 16) + copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00}) + binary.BigEndian.PutUint64(header[8:], senderSeqNum) + return header +} + +// Verify computes the token's checksum with the provided key and usage, +// and compares it to the checksum present in the token. +// In case of any failure, (false, Err) is returned, with Err an explanatory error. +func (wt *WrapToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) { + computed, cErr := wt.computeCheckSum(key, keyUsage) + if cErr != nil { + return false, cErr + } + if !hmac.Equal(computed, wt.CheckSum) { + return false, fmt.Errorf( + "checksum mismatch. Computed: %s, Contained in token: %s", + hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum)) + } + return true, nil +} + +// Unmarshal bytes into the corresponding WrapToken. +// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor, +// and will check the according flag, returning an error if the token does not match the expectation. +func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error { + // Check if we can read a whole header + if len(b) < 16 { + return errors.New("bytes shorter than header length") + } + // Is the Token ID correct? + if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) { + return fmt.Errorf("wrong Token ID. Expected %s, was %s", + hex.EncodeToString(getGssWrapTokenId()[:]), + hex.EncodeToString(b[0:2])) + } + // Check the acceptor flag + flags := b[2] + isFromAcceptor := flags&0x01 == 1 + if isFromAcceptor && !expectFromAcceptor { + return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") + } + if !isFromAcceptor && expectFromAcceptor { + return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator") + } + // Check the filler byte + if b[3] != FillerByte { + return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4])) + } + checksumL := binary.BigEndian.Uint16(b[4:6]) + // Sanity check on the checksum length + if int(checksumL) > len(b)-HdrLen { + return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL) + } + + wt.Flags = flags + wt.EC = checksumL + wt.RRC = binary.BigEndian.Uint16(b[6:8]) + wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) + wt.Payload = b[16 : len(b)-int(checksumL)] + wt.CheckSum = b[len(b)-int(checksumL):] + return nil +} + +// NewInitiatorWrapToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. +// Other flags are set to 0, and the RRC and sequence number are initialized to 0. +// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. +// This is currently not supported. +func NewInitiatorWrapToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) { + encType, err := crypto.GetEtype(key.KeyType) + if err != nil { + return nil, err + } + + token := WrapToken{ + Flags: 0x00, // all zeroed out (this is a token sent by the initiator) + // Checksum size: length of output of the HMAC function, in bytes. + EC: uint16(encType.GetHMACBitLength() / 8), + RRC: 0, + SndSeqNum: 0, + Payload: payload, + } + + if err := token.SetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil { + return nil, err + } + + return &token, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go new file mode 100644 index 00000000..457b89d7 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go @@ -0,0 +1,15 @@ +// Package addrtype provides Address type assigned numbers. +package addrtype + +// Address type IDs. +const ( + IPv4 int32 = 2 + Directional int32 = 3 + ChaosNet int32 = 5 + XNS int32 = 6 + ISO int32 = 7 + DECNETPhaseIV int32 = 12 + AppleTalkDDP int32 = 16 + NetBios int32 = 20 + IPv6 int32 = 24 +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go new file mode 100644 index 00000000..e805b746 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go @@ -0,0 +1,23 @@ +// Package adtype provides Authenticator type assigned numbers. +package adtype + +// Authenticator type IDs. +const ( + ADIfRelevant int32 = 1 + ADIntendedForServer int32 = 2 + ADIntendedForApplicationClass int32 = 3 + ADKDCIssued int32 = 4 + ADAndOr int32 = 5 + ADMandatoryTicketExtensions int32 = 6 + ADInTicketExtensions int32 = 7 + ADMandatoryForKDC int32 = 8 + OSFDCE int32 = 64 + SESAME int32 = 65 + ADOSFDCEPKICertID int32 = 66 + ADAuthenticationStrength int32 = 70 + ADFXFastArmor int32 = 71 + ADFXFastUsed int32 = 72 + ADWin2KPAC int32 = 128 + ADEtypeNegotiation int32 = 129 + //Reserved values 9-63 +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go new file mode 100644 index 00000000..d74cd60e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go @@ -0,0 +1,24 @@ +// Package asnAppTag provides ASN1 application tag numbers. +package asnAppTag + +// ASN1 application tag numbers. +const ( + Ticket = 1 + Authenticator = 2 + EncTicketPart = 3 + ASREQ = 10 + TGSREQ = 12 + ASREP = 11 + TGSREP = 13 + APREQ = 14 + APREP = 15 + KRBSafe = 20 + KRBPriv = 21 + KRBCred = 22 + EncASRepPart = 25 + EncTGSRepPart = 26 + EncAPRepPart = 27 + EncKrbPrivPart = 28 + EncKrbCredPart = 29 + KRBError = 30 +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go new file mode 100644 index 00000000..93db952d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go @@ -0,0 +1,32 @@ +// Package chksumtype provides Kerberos 5 checksum type assigned numbers. +package chksumtype + +// Checksum type IDs. +const ( + //RESERVED : 0 + CRC32 int32 = 1 + RSA_MD4 int32 = 2 + RSA_MD4_DES int32 = 3 + DES_MAC int32 = 4 + DES_MAC_K int32 = 5 + RSA_MD4_DES_K int32 = 6 + RSA_MD5 int32 = 7 + RSA_MD5_DES int32 = 8 + RSA_MD5_DES3 int32 = 9 + SHA1_ID10 int32 = 10 + //UNASSIGNED : 11 + HMAC_SHA1_DES3_KD int32 = 12 + HMAC_SHA1_DES3 int32 = 13 + SHA1_ID14 int32 = 14 + HMAC_SHA1_96_AES128 int32 = 15 + HMAC_SHA1_96_AES256 int32 = 16 + CMAC_CAMELLIA128 int32 = 17 + CMAC_CAMELLIA256 int32 = 18 + HMAC_SHA256_128_AES128 int32 = 19 + HMAC_SHA384_192_AES256 int32 = 20 + //UNASSIGNED : 21-32770 + GSSAPI int32 = 32771 + //UNASSIGNED : 32772-2147483647 + KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158 + KERB_CHECKSUM_HMAC_MD5 int32 = -138 +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go new file mode 100644 index 00000000..0b8e916d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go @@ -0,0 +1,5 @@ +// Package iana provides Kerberos 5 assigned numbers. +package iana + +// PVNO is the Protocol Version Number. +const PVNO = 5 diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go new file mode 100644 index 00000000..fd756bc5 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go @@ -0,0 +1,155 @@ +// Package errorcode provides Kerberos 5 assigned error codes. +package errorcode + +import "fmt" + +// Kerberos error codes. +const ( + KDC_ERR_NONE int32 = 0 //No error + KDC_ERR_NAME_EXP int32 = 1 //Client's entry in database has expired + KDC_ERR_SERVICE_EXP int32 = 2 //Server's entry in database has expired + KDC_ERR_BAD_PVNO int32 = 3 //Requested protocol version number not supported + KDC_ERR_C_OLD_MAST_KVNO int32 = 4 //Client's key encrypted in old master key + KDC_ERR_S_OLD_MAST_KVNO int32 = 5 //Server's key encrypted in old master key + KDC_ERR_C_PRINCIPAL_UNKNOWN int32 = 6 //Client not found in Kerberos database + KDC_ERR_S_PRINCIPAL_UNKNOWN int32 = 7 //Server not found in Kerberos database + KDC_ERR_PRINCIPAL_NOT_UNIQUE int32 = 8 //Multiple principal entries in database + KDC_ERR_NULL_KEY int32 = 9 //The client or server has a null key + KDC_ERR_CANNOT_POSTDATE int32 = 10 //Ticket not eligible for postdating + KDC_ERR_NEVER_VALID int32 = 11 //Requested starttime is later than end time + KDC_ERR_POLICY int32 = 12 //KDC policy rejects request + KDC_ERR_BADOPTION int32 = 13 //KDC cannot accommodate requested option + KDC_ERR_ETYPE_NOSUPP int32 = 14 //KDC has no support for encryption type + KDC_ERR_SUMTYPE_NOSUPP int32 = 15 //KDC has no support for checksum type + KDC_ERR_PADATA_TYPE_NOSUPP int32 = 16 //KDC has no support for padata type + KDC_ERR_TRTYPE_NOSUPP int32 = 17 //KDC has no support for transited type + KDC_ERR_CLIENT_REVOKED int32 = 18 //Clients credentials have been revoked + KDC_ERR_SERVICE_REVOKED int32 = 19 //Credentials for server have been revoked + KDC_ERR_TGT_REVOKED int32 = 20 //TGT has been revoked + KDC_ERR_CLIENT_NOTYET int32 = 21 //Client not yet valid; try again later + KDC_ERR_SERVICE_NOTYET int32 = 22 //Server not yet valid; try again later + KDC_ERR_KEY_EXPIRED int32 = 23 //Password has expired; change password to reset + KDC_ERR_PREAUTH_FAILED int32 = 24 //Pre-authentication information was invalid + KDC_ERR_PREAUTH_REQUIRED int32 = 25 //Additional pre-authentication required + KDC_ERR_SERVER_NOMATCH int32 = 26 //Requested server and ticket don't match + KDC_ERR_MUST_USE_USER2USER int32 = 27 //Server principal valid for user2user only + KDC_ERR_PATH_NOT_ACCEPTED int32 = 28 //KDC Policy rejects transited path + KDC_ERR_SVC_UNAVAILABLE int32 = 29 //A service is not available + KRB_AP_ERR_BAD_INTEGRITY int32 = 31 //Integrity check on decrypted field failed + KRB_AP_ERR_TKT_EXPIRED int32 = 32 //Ticket expired + KRB_AP_ERR_TKT_NYV int32 = 33 //Ticket not yet valid + KRB_AP_ERR_REPEAT int32 = 34 //Request is a replay + KRB_AP_ERR_NOT_US int32 = 35 //The ticket isn't for us + KRB_AP_ERR_BADMATCH int32 = 36 //Ticket and authenticator don't match + KRB_AP_ERR_SKEW int32 = 37 //Clock skew too great + KRB_AP_ERR_BADADDR int32 = 38 //Incorrect net address + KRB_AP_ERR_BADVERSION int32 = 39 //Protocol version mismatch + KRB_AP_ERR_MSG_TYPE int32 = 40 //Invalid msg type + KRB_AP_ERR_MODIFIED int32 = 41 //Message stream modified + KRB_AP_ERR_BADORDER int32 = 42 //Message out of order + KRB_AP_ERR_BADKEYVER int32 = 44 //Specified version of key is not available + KRB_AP_ERR_NOKEY int32 = 45 //Service key not available + KRB_AP_ERR_MUT_FAIL int32 = 46 //Mutual authentication failed + KRB_AP_ERR_BADDIRECTION int32 = 47 //Incorrect message direction + KRB_AP_ERR_METHOD int32 = 48 //Alternative authentication method required + KRB_AP_ERR_BADSEQ int32 = 49 //Incorrect sequence number in message + KRB_AP_ERR_INAPP_CKSUM int32 = 50 //Inappropriate type of checksum in message + KRB_AP_PATH_NOT_ACCEPTED int32 = 51 //Policy rejects transited path + KRB_ERR_RESPONSE_TOO_BIG int32 = 52 //Response too big for UDP; retry with TCP + KRB_ERR_GENERIC int32 = 60 //Generic error (description in e-text) + KRB_ERR_FIELD_TOOLONG int32 = 61 //Field is too long for this implementation + KDC_ERROR_CLIENT_NOT_TRUSTED int32 = 62 //Reserved for PKINIT + KDC_ERROR_KDC_NOT_TRUSTED int32 = 63 //Reserved for PKINIT + KDC_ERROR_INVALID_SIG int32 = 64 //Reserved for PKINIT + KDC_ERR_KEY_TOO_WEAK int32 = 65 //Reserved for PKINIT + KDC_ERR_CERTIFICATE_MISMATCH int32 = 66 //Reserved for PKINIT + KRB_AP_ERR_NO_TGT int32 = 67 //No TGT available to validate USER-TO-USER + KDC_ERR_WRONG_REALM int32 = 68 //Reserved for future use + KRB_AP_ERR_USER_TO_USER_REQUIRED int32 = 69 //Ticket must be for USER-TO-USER + KDC_ERR_CANT_VERIFY_CERTIFICATE int32 = 70 //Reserved for PKINIT + KDC_ERR_INVALID_CERTIFICATE int32 = 71 //Reserved for PKINIT + KDC_ERR_REVOKED_CERTIFICATE int32 = 72 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNKNOWN int32 = 73 //Reserved for PKINIT + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT + KDC_ERR_CLIENT_NAME_MISMATCH int32 = 75 //Reserved for PKINIT + KDC_ERR_KDC_NAME_MISMATCH int32 = 76 //Reserved for PKINIT +) + +// Lookup an error code description. +func Lookup(i int32) string { + if s, ok := errorcodeLookup[i]; ok { + return fmt.Sprintf("(%d) %s", i, s) + } + return fmt.Sprintf("Unknown ErrorCode %d", i) +} + +var errorcodeLookup = map[int32]string{ + KDC_ERR_NONE: "KDC_ERR_NONE No error", + KDC_ERR_NAME_EXP: "KDC_ERR_NAME_EXP Client's entry in database has expired", + KDC_ERR_SERVICE_EXP: "KDC_ERR_SERVICE_EXP Server's entry in database has expired", + KDC_ERR_BAD_PVNO: "KDC_ERR_BAD_PVNO Requested protocol version number not supported", + KDC_ERR_C_OLD_MAST_KVNO: "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key", + KDC_ERR_S_OLD_MAST_KVNO: "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key", + KDC_ERR_C_PRINCIPAL_UNKNOWN: "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database", + KDC_ERR_S_PRINCIPAL_UNKNOWN: "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database", + KDC_ERR_PRINCIPAL_NOT_UNIQUE: "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database", + KDC_ERR_NULL_KEY: "KDC_ERR_NULL_KEY The client or server has a null key", + KDC_ERR_CANNOT_POSTDATE: "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating", + KDC_ERR_NEVER_VALID: "KDC_ERR_NEVER_VALID Requested starttime is later than end time", + KDC_ERR_POLICY: "KDC_ERR_POLICY KDC policy rejects request", + KDC_ERR_BADOPTION: "KDC_ERR_BADOPTION KDC cannot accommodate requested option", + KDC_ERR_ETYPE_NOSUPP: "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type", + KDC_ERR_SUMTYPE_NOSUPP: "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type", + KDC_ERR_PADATA_TYPE_NOSUPP: "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type", + KDC_ERR_TRTYPE_NOSUPP: "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type", + KDC_ERR_CLIENT_REVOKED: "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked", + KDC_ERR_SERVICE_REVOKED: "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked", + KDC_ERR_TGT_REVOKED: "KDC_ERR_TGT_REVOKED TGT has been revoked", + KDC_ERR_CLIENT_NOTYET: "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later", + KDC_ERR_SERVICE_NOTYET: "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later", + KDC_ERR_KEY_EXPIRED: "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset", + KDC_ERR_PREAUTH_FAILED: "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid", + KDC_ERR_PREAUTH_REQUIRED: "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required", + KDC_ERR_SERVER_NOMATCH: "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match", + KDC_ERR_MUST_USE_USER2USER: "KDC_ERR_MUST_USE_USER2USER Server principal valid for user2user only", + KDC_ERR_PATH_NOT_ACCEPTED: "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path", + KDC_ERR_SVC_UNAVAILABLE: "KDC_ERR_SVC_UNAVAILABLE A service is not available", + KRB_AP_ERR_BAD_INTEGRITY: "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed", + KRB_AP_ERR_TKT_EXPIRED: "KRB_AP_ERR_TKT_EXPIRED Ticket expired", + KRB_AP_ERR_TKT_NYV: "KRB_AP_ERR_TKT_NYV Ticket not yet valid", + KRB_AP_ERR_REPEAT: "KRB_AP_ERR_REPEAT Request is a replay", + KRB_AP_ERR_NOT_US: "KRB_AP_ERR_NOT_US The ticket isn't for us", + KRB_AP_ERR_BADMATCH: "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match", + KRB_AP_ERR_SKEW: "KRB_AP_ERR_SKEW Clock skew too great", + KRB_AP_ERR_BADADDR: "KRB_AP_ERR_BADADDR Incorrect net address", + KRB_AP_ERR_BADVERSION: "KRB_AP_ERR_BADVERSION Protocol version mismatch", + KRB_AP_ERR_MSG_TYPE: "KRB_AP_ERR_MSG_TYPE Invalid msg type", + KRB_AP_ERR_MODIFIED: "KRB_AP_ERR_MODIFIED Message stream modified", + KRB_AP_ERR_BADORDER: "KRB_AP_ERR_BADORDER Message out of order", + KRB_AP_ERR_BADKEYVER: "KRB_AP_ERR_BADKEYVER Specified version of key is not available", + KRB_AP_ERR_NOKEY: "KRB_AP_ERR_NOKEY Service key not available", + KRB_AP_ERR_MUT_FAIL: "KRB_AP_ERR_MUT_FAIL Mutual authentication failed", + KRB_AP_ERR_BADDIRECTION: "KRB_AP_ERR_BADDIRECTION Incorrect message direction", + KRB_AP_ERR_METHOD: "KRB_AP_ERR_METHOD Alternative authentication method required", + KRB_AP_ERR_BADSEQ: "KRB_AP_ERR_BADSEQ Incorrect sequence number in message", + KRB_AP_ERR_INAPP_CKSUM: "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message", + KRB_AP_PATH_NOT_ACCEPTED: "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path", + KRB_ERR_RESPONSE_TOO_BIG: "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP", + KRB_ERR_GENERIC: "KRB_ERR_GENERIC Generic error (description in e-text)", + KRB_ERR_FIELD_TOOLONG: "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation", + KDC_ERROR_CLIENT_NOT_TRUSTED: "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_KDC_NOT_TRUSTED: "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT", + KDC_ERROR_INVALID_SIG: "KDC_ERROR_INVALID_SIG Reserved for PKINIT", + KDC_ERR_KEY_TOO_WEAK: "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT", + KDC_ERR_CERTIFICATE_MISMATCH: "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT", + KRB_AP_ERR_NO_TGT: "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER", + KDC_ERR_WRONG_REALM: "KDC_ERR_WRONG_REALM Reserved for future use", + KRB_AP_ERR_USER_TO_USER_REQUIRED: "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER", + KDC_ERR_CANT_VERIFY_CERTIFICATE: "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT", + KDC_ERR_INVALID_CERTIFICATE: "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOKED_CERTIFICATE: "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNKNOWN: "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT", + KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT", + KDC_ERR_CLIENT_NAME_MISMATCH: "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT", + KDC_ERR_KDC_NAME_MISMATCH: "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT", +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go new file mode 100644 index 00000000..46a0d748 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go @@ -0,0 +1,101 @@ +// Package etypeID provides Kerberos 5 encryption type assigned numbers. +package etypeID + +// Kerberos encryption type assigned numbers. +const ( + //RESERVED : 0 + DES_CBC_CRC int32 = 1 + DES_CBC_MD4 int32 = 2 + DES_CBC_MD5 int32 = 3 + DES_CBC_RAW int32 = 4 + DES3_CBC_MD5 int32 = 5 + DES3_CBC_RAW int32 = 6 + DES3_CBC_SHA1 int32 = 7 + DES_HMAC_SHA1 int32 = 8 + DSAWITHSHA1_CMSOID int32 = 9 + MD5WITHRSAENCRYPTION_CMSOID int32 = 10 + SHA1WITHRSAENCRYPTION_CMSOID int32 = 11 + RC2CBC_ENVOID int32 = 12 + RSAENCRYPTION_ENVOID int32 = 13 + RSAES_OAEP_ENV_OID int32 = 14 + DES_EDE3_CBC_ENV_OID int32 = 15 + DES3_CBC_SHA1_KD int32 = 16 + AES128_CTS_HMAC_SHA1_96 int32 = 17 + AES256_CTS_HMAC_SHA1_96 int32 = 18 + AES128_CTS_HMAC_SHA256_128 int32 = 19 + AES256_CTS_HMAC_SHA384_192 int32 = 20 + //UNASSIGNED : 21-22 + RC4_HMAC int32 = 23 + RC4_HMAC_EXP int32 = 24 + CAMELLIA128_CTS_CMAC int32 = 25 + CAMELLIA256_CTS_CMAC int32 = 26 + //UNASSIGNED : 27-64 + SUBKEY_KEYMATERIAL int32 = 65 + //UNASSIGNED : 66-2147483647 +) + +// ETypesByName is a map of EncType names to their assigned EncType number. +var ETypesByName = map[string]int32{ + "des-cbc-crc": DES_CBC_CRC, + "des-cbc-md4": DES_CBC_MD4, + "des-cbc-md5": DES_CBC_MD5, + "des-cbc-raw": DES_CBC_RAW, + "des3-cbc-md5": DES3_CBC_MD5, + "des3-cbc-raw": DES3_CBC_RAW, + "des3-cbc-sha1": DES3_CBC_SHA1, + "des3-hmac-sha1": DES_HMAC_SHA1, + "des3-cbc-sha1-kd": DES3_CBC_SHA1_KD, + "des-hmac-sha1": DES_HMAC_SHA1, + "dsaWithSHA1-CmsOID": DSAWITHSHA1_CMSOID, + "md5WithRSAEncryption-CmsOID": MD5WITHRSAENCRYPTION_CMSOID, + "sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID, + "rc2CBC-EnvOID": RC2CBC_ENVOID, + "rsaEncryption-EnvOID": RSAENCRYPTION_ENVOID, + "rsaES-OAEP-ENV-OID": RSAES_OAEP_ENV_OID, + "des-ede3-cbc-Env-OID": DES_EDE3_CBC_ENV_OID, + "aes128-cts-hmac-sha1-96": AES128_CTS_HMAC_SHA1_96, + "aes128-cts": AES128_CTS_HMAC_SHA1_96, + "aes128-sha1": AES128_CTS_HMAC_SHA1_96, + "aes256-cts-hmac-sha1-96": AES256_CTS_HMAC_SHA1_96, + "aes256-cts": AES256_CTS_HMAC_SHA1_96, + "aes256-sha1": AES256_CTS_HMAC_SHA1_96, + "aes128-cts-hmac-sha256-128": AES128_CTS_HMAC_SHA256_128, + "aes128-sha2": AES128_CTS_HMAC_SHA256_128, + "aes256-cts-hmac-sha384-192": AES256_CTS_HMAC_SHA384_192, + "aes256-sha2": AES256_CTS_HMAC_SHA384_192, + "arcfour-hmac": RC4_HMAC, + "rc4-hmac": RC4_HMAC, + "arcfour-hmac-md5": RC4_HMAC, + "arcfour-hmac-exp": RC4_HMAC_EXP, + "rc4-hmac-exp": RC4_HMAC_EXP, + "arcfour-hmac-md5-exp": RC4_HMAC_EXP, + "camellia128-cts-cmac": CAMELLIA128_CTS_CMAC, + "camellia128-cts": CAMELLIA128_CTS_CMAC, + "camellia256-cts-cmac": CAMELLIA256_CTS_CMAC, + "camellia256-cts": CAMELLIA256_CTS_CMAC, + "subkey-keymaterial": SUBKEY_KEYMATERIAL, +} + +// EtypeSupported resolves the etype name string to the etype ID. +// If zero is returned the etype is not supported by gokrb5. +func EtypeSupported(etype string) int32 { + // Slice of supported enctype IDs + s := []int32{ + AES128_CTS_HMAC_SHA1_96, + AES256_CTS_HMAC_SHA1_96, + AES128_CTS_HMAC_SHA256_128, + AES256_CTS_HMAC_SHA384_192, + DES3_CBC_SHA1_KD, + RC4_HMAC, + } + id := ETypesByName[etype] + if id == 0 { + return id + } + for _, sid := range s { + if id == sid { + return id + } + } + return 0 +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go new file mode 100644 index 00000000..787801f8 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go @@ -0,0 +1,36 @@ +// Package flags provides Kerberos 5 flag assigned numbers. +package flags + +// Flag values for KRB5 messages and tickets. +const ( + Reserved = 0 + Forwardable = 1 + Forwarded = 2 + Proxiable = 3 + Proxy = 4 + AllowPostDate = 5 + MayPostDate = 5 + PostDated = 6 + Invalid = 7 + Renewable = 8 + Initial = 9 + PreAuthent = 10 + HWAuthent = 11 + OptHardwareAuth = 11 + RequestAnonymous = 12 + TransitedPolicyChecked = 12 + OKAsDelegate = 13 + EncPARep = 15 + Canonicalize = 15 + DisableTransitedCheck = 26 + RenewableOK = 27 + EncTktInSkey = 28 + Renew = 30 + Validate = 31 + + // AP Option Flags + // 0 Reserved for future use. + APOptionUseSessionKey = 1 + APOptionMutualRequired = 2 + // 3-31 Reserved for future use. +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go new file mode 100644 index 00000000..5b232d1d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go @@ -0,0 +1,42 @@ +// Package keyusage provides Kerberos 5 key usage assigned numbers. +package keyusage + +// Key usage numbers. +const ( + AS_REQ_PA_ENC_TIMESTAMP = 1 + KDC_REP_TICKET = 2 + AS_REP_ENCPART = 3 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY = 4 + TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY = 5 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6 + TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR = 7 + TGS_REP_ENCPART_SESSION_KEY = 8 + TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY = 9 + AP_REQ_AUTHENTICATOR_CHKSUM = 10 + AP_REQ_AUTHENTICATOR = 11 + AP_REP_ENCPART = 12 + KRB_PRIV_ENCPART = 13 + KRB_CRED_ENCPART = 14 + KRB_SAFE_CHKSUM = 15 + KERB_NON_KERB_SALT = 16 + KERB_NON_KERB_CKSUM_SALT = 17 + //18. Reserved for future use in Kerberos and related protocols. + AD_KDC_ISSUED_CHKSUM = 19 + //20-21. Reserved for future use in Kerberos and related protocols. + GSSAPI_ACCEPTOR_SEAL = 22 + GSSAPI_ACCEPTOR_SIGN = 23 + GSSAPI_INITIATOR_SEAL = 24 + GSSAPI_INITIATOR_SIGN = 25 + KEY_USAGE_FAST_REQ_CHKSUM = 50 + KEY_USAGE_FAST_ENC = 51 + KEY_USAGE_FAST_REP = 52 + KEY_USAGE_FAST_FINISHED = 53 + KEY_USAGE_ENC_CHALLENGE_CLIENT = 54 + KEY_USAGE_ENC_CHALLENGE_KDC = 55 + KEY_USAGE_AS_REQ = 56 + //26-511. Reserved for future use in Kerberos and related protocols. + //512-1023. Reserved for uses internal to a Kerberos implementation. + //1024. Encryption for application use in protocols that do not specify key usage values + //1025. Checksums for application use in protocols that do not specify key usage values + //1026-2047. Reserved for application use. +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go new file mode 100644 index 00000000..ad21810b --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go @@ -0,0 +1,18 @@ +// Package msgtype provides Kerberos 5 message type assigned numbers. +package msgtype + +// KRB message type IDs. +const ( + KRB_AS_REQ = 10 //Request for initial authentication + KRB_AS_REP = 11 //Response to KRB_AS_REQ request + KRB_TGS_REQ = 12 //Request for authentication based on TGT + KRB_TGS_REP = 13 //Response to KRB_TGS_REQ request + KRB_AP_REQ = 14 //Application request to server + KRB_AP_REP = 15 //Response to KRB_AP_REQ_MUTUAL + KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request + KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply + KRB_SAFE = 20 // Safe (checksummed) application message + KRB_PRIV = 21 // Private (encrypted) application message + KRB_CRED = 22 //Private (encrypted) message to forward credentials + KRB_ERROR = 30 //Error response +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go new file mode 100644 index 00000000..c111a05f --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go @@ -0,0 +1,15 @@ +// Package nametype provides Kerberos 5 principal name type numbers. +package nametype + +// Kerberos name type IDs. +const ( + KRB_NT_UNKNOWN int32 = 0 //Name type not known + KRB_NT_PRINCIPAL int32 = 1 //Just the name of the principal as in DCE, or for users + KRB_NT_SRV_INST int32 = 2 //Service and other unique instance (krbtgt) + KRB_NT_SRV_HST int32 = 3 //Service with host name as instance (telnet, rcommands) + KRB_NT_SRV_XHST int32 = 4 //Service with host as remaining components + KRB_NT_UID int32 = 5 //Unique ID + KRB_NT_X500_PRINCIPAL int32 = 6 //Encoded X.509 Distinguished name [RFC2253] + KRB_NT_SMTP_NAME int32 = 7 //Name in form of SMTP email name (e.g., user@example.com) + KRB_NT_ENTERPRISE int32 = 10 //Enterprise name; may be mapped to principal name +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go new file mode 100644 index 00000000..aa04f637 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go @@ -0,0 +1,77 @@ +// Package patype provides Kerberos 5 pre-authentication type assigned numbers. +package patype + +// Kerberos pre-authentication type assigned numbers. +const ( + PA_TGS_REQ int32 = 1 + PA_ENC_TIMESTAMP int32 = 2 + PA_PW_SALT int32 = 3 + //RESERVED : 4 + PA_ENC_UNIX_TIME int32 = 5 + PA_SANDIA_SECUREID int32 = 6 + PA_SESAME int32 = 7 + PA_OSF_DCE int32 = 8 + PA_CYBERSAFE_SECUREID int32 = 9 + PA_AFS3_SALT int32 = 10 + PA_ETYPE_INFO int32 = 11 + PA_SAM_CHALLENGE int32 = 12 + PA_SAM_RESPONSE int32 = 13 + PA_PK_AS_REQ_OLD int32 = 14 + PA_PK_AS_REP_OLD int32 = 15 + PA_PK_AS_REQ int32 = 16 + PA_PK_AS_REP int32 = 17 + PA_PK_OCSP_RESPONSE int32 = 18 + PA_ETYPE_INFO2 int32 = 19 + PA_USE_SPECIFIED_KVNO int32 = 20 + PA_SVR_REFERRAL_INFO int32 = 20 + PA_SAM_REDIRECT int32 = 21 + PA_GET_FROM_TYPED_DATA int32 = 22 + TD_PADATA int32 = 22 + PA_SAM_ETYPE_INFO int32 = 23 + PA_ALT_PRINC int32 = 24 + PA_SERVER_REFERRAL int32 = 25 + //UNASSIGNED : 26-29 + PA_SAM_CHALLENGE2 int32 = 30 + PA_SAM_RESPONSE2 int32 = 31 + //UNASSIGNED : 32-40 + PA_EXTRA_TGT int32 = 41 + //UNASSIGNED : 42-100 + TD_PKINIT_CMS_CERTIFICATES int32 = 101 + TD_KRB_PRINCIPAL int32 = 102 + TD_KRB_REALM int32 = 103 + TD_TRUSTED_CERTIFIERS int32 = 104 + TD_CERTIFICATE_INDEX int32 = 105 + TD_APP_DEFINED_ERROR int32 = 106 + TD_REQ_NONCE int32 = 107 + TD_REQ_SEQ int32 = 108 + TD_DH_PARAMETERS int32 = 109 + //UNASSIGNED : 110 + TD_CMS_DIGEST_ALGORITHMS int32 = 111 + TD_CERT_DIGEST_ALGORITHMS int32 = 112 + //UNASSIGNED : 113-127 + PA_PAC_REQUEST int32 = 128 + PA_FOR_USER int32 = 129 + PA_FOR_X509_USER int32 = 130 + PA_FOR_CHECK_DUPS int32 = 131 + PA_AS_CHECKSUM int32 = 132 + PA_FX_COOKIE int32 = 133 + PA_AUTHENTICATION_SET int32 = 134 + PA_AUTH_SET_SELECTED int32 = 135 + PA_FX_FAST int32 = 136 + PA_FX_ERROR int32 = 137 + PA_ENCRYPTED_CHALLENGE int32 = 138 + //UNASSIGNED : 139-140 + PA_OTP_CHALLENGE int32 = 141 + PA_OTP_REQUEST int32 = 142 + PA_OTP_CONFIRM int32 = 143 + PA_OTP_PIN_CHANGE int32 = 144 + PA_EPAK_AS_REQ int32 = 145 + PA_EPAK_AS_REP int32 = 146 + PA_PKINIT_KX int32 = 147 + PA_PKU2U_NAME int32 = 148 + PA_REQ_ENC_PA_REP int32 = 149 + PA_AS_FRESHNESS int32 = 150 + //UNASSIGNED : 151-164 + PA_SUPPORTED_ETYPES int32 = 165 + PA_EXTENDED_ERROR int32 = 166 +) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go new file mode 100644 index 00000000..a3e2efdd --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go @@ -0,0 +1,23 @@ +package kadmin + +import ( + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// ChangePasswdData is the payload to a password change message. +type ChangePasswdData struct { + NewPasswd []byte `asn1:"explicit,tag:0"` + TargName types.PrincipalName `asn1:"explicit,optional,tag:1"` + TargRealm string `asn1:"generalstring,optional,explicit,tag:2"` +} + +// Marshal ChangePasswdData into a byte slice. +func (c *ChangePasswdData) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*c) + if err != nil { + return []byte{}, err + } + //b = asn1tools.AddASNAppTag(b, asnAppTag.) + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go new file mode 100644 index 00000000..157fcad4 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go @@ -0,0 +1,114 @@ +package kadmin + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + verisonHex = "ff80" +) + +// Request message for changing password. +type Request struct { + APREQ messages.APReq + KRBPriv messages.KRBPriv +} + +// Reply message for a password change. +type Reply struct { + MessageLength int + Version int + APREPLength int + APREP messages.APRep + KRBPriv messages.KRBPriv + KRBError messages.KRBError + IsKRBError bool + ResultCode uint16 + Result string +} + +// Marshal a Request into a byte slice. +func (m *Request) Marshal() (b []byte, err error) { + b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer). + ab, e := m.APREQ.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling AP_REQ: %v", e) + return + } + if len(ab) > math.MaxUint16 { + err = errors.New("length of AP_REQ greater then max Uint16 size") + return + } + al := make([]byte, 2) + binary.BigEndian.PutUint16(al, uint16(len(ab))) + b = append(b, al...) + b = append(b, ab...) + pb, e := m.KRBPriv.Marshal() + if e != nil { + err = fmt.Errorf("error marshaling KRB_Priv: %v", e) + return + } + b = append(b, pb...) + if len(b)+2 > math.MaxUint16 { + err = errors.New("length of message greater then max Uint16 size") + return + } + ml := make([]byte, 2) + binary.BigEndian.PutUint16(ml, uint16(len(b)+2)) + b = append(ml, b...) + return +} + +// Unmarshal a byte slice into a Reply. +func (m *Reply) Unmarshal(b []byte) error { + m.MessageLength = int(binary.BigEndian.Uint16(b[0:2])) + m.Version = int(binary.BigEndian.Uint16(b[2:4])) + if m.Version != 1 { + return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version) + } + m.APREPLength = int(binary.BigEndian.Uint16(b[4:6])) + if m.APREPLength != 0 { + err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength]) + if err != nil { + return err + } + err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength]) + if err != nil { + return err + } + } else { + m.IsKRBError = true + m.KRBError.Unmarshal(b[6:m.MessageLength]) + m.ResultCode, m.Result = parseResponse(m.KRBError.EData) + } + return nil +} + +func parseResponse(b []byte) (c uint16, s string) { + c = binary.BigEndian.Uint16(b[0:2]) + buf := bytes.NewBuffer(b[2:]) + m := make([]byte, len(b)-2) + binary.Read(buf, binary.BigEndian, &m) + s = string(m) + return +} + +// Decrypt the encrypted part of the KRBError within the change password Reply. +func (m *Reply) Decrypt(key types.EncryptionKey) error { + if m.IsKRBError { + return m.KRBError + } + err := m.KRBPriv.DecryptEncPart(key) + if err != nil { + return err + } + m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData) + return nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go new file mode 100644 index 00000000..2a7491aa --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go @@ -0,0 +1,68 @@ +// Package kadmin provides Kerberos administration capabilities. +package kadmin + +import ( + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/messages" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply. +func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) { + // Create change password data struct and marshal to bytes + chgpasswd := ChangePasswdData{ + NewPasswd: []byte(password), + TargName: cname, + TargRealm: realm, + } + chpwdb, err := chgpasswd.Marshal() + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data") + return + } + + // Generate authenticator + auth, err := types.NewAuthenticator(realm, cname) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + return + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype") + return + } + err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize()) + if err != nil { + err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey") + return + } + k = auth.SubKey + + // Generate AP_REQ + APreq, err := messages.NewAPReq(tkt, sessionKey, auth) + if err != nil { + return + } + + // Form the KRBPriv encpart data + kp := messages.EncKrbPrivPart{ + UserData: chpwdb, + Timestamp: auth.CTime, + Usec: auth.Cusec, + SequenceNumber: auth.SeqNumber, + } + kpriv := messages.NewKRBPriv(kp) + err = kpriv.EncryptEncPart(k) + if err != nil { + err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data") + return + } + + r = Request{ + APREQ: APreq, + KRBPriv: kpriv, + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go new file mode 100644 index 00000000..22c02044 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go @@ -0,0 +1,466 @@ +// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html. +package keytab + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "time" + "unsafe" + + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +const ( + keytabFirstByte byte = 05 +) + +// Keytab struct. +type Keytab struct { + version uint8 + Entries []entry +} + +// Keytab entry struct. +type entry struct { + Principal principal + Timestamp time.Time + KVNO8 uint8 + Key types.EncryptionKey + KVNO uint32 +} + +// Keytab entry principal struct. +type principal struct { + NumComponents int16 + Realm string + Components []string + NameType int32 +} + +// New creates new, empty Keytab type. +func New() *Keytab { + var e []entry + return &Keytab{ + version: 0, + Entries: e, + } +} + +// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal. +func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, kvno int, etype int32) (types.EncryptionKey, error) { + //TODO (theme: KVNO from keytab) this function should return the kvno too + var key types.EncryptionKey + var t time.Time + for _, k := range kt.Entries { + if k.Principal.Realm == realm && len(k.Principal.Components) == len(princName.NameString) && + k.Key.KeyType == etype && + (k.KVNO == uint32(kvno) || kvno == 0) && + k.Timestamp.After(t) { + p := true + for i, n := range k.Principal.Components { + if princName.NameString[i] != n { + p = false + break + } + } + if p { + key = k.Key + t = k.Timestamp + } + } + } + if len(key.KeyValue) < 1 { + return key, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype) + } + return key, nil +} + +// Create a new Keytab entry. +func newKeytabEntry() entry { + var b []byte + return entry{ + Principal: newPrincipal(), + Timestamp: time.Time{}, + KVNO8: 0, + Key: types.EncryptionKey{ + KeyType: 0, + KeyValue: b, + }, + KVNO: 0, + } +} + +// Create a new principal. +func newPrincipal() principal { + var c []string + return principal{ + NumComponents: 0, + Realm: "", + Components: c, + NameType: 0, + } +} + +// Load a Keytab file into a Keytab type. +func Load(ktPath string) (*Keytab, error) { + kt := new(Keytab) + b, err := ioutil.ReadFile(ktPath) + if err != nil { + return kt, err + } + err = kt.Unmarshal(b) + return kt, err +} + +// Marshal keytab into byte slice +func (kt *Keytab) Marshal() ([]byte, error) { + b := []byte{keytabFirstByte, kt.version} + for _, e := range kt.Entries { + eb, err := e.marshal(int(kt.version)) + if err != nil { + return b, err + } + b = append(b, eb...) + } + return b, nil +} + +// Write the keytab bytes to io.Writer. +// Returns the number of bytes written +func (kt *Keytab) Write(w io.Writer) (int, error) { + b, err := kt.Marshal() + if err != nil { + return 0, fmt.Errorf("error marshaling keytab: %v", err) + } + return w.Write(b) +} + +// Unmarshal byte slice of Keytab data into Keytab type. +func (kt *Keytab) Unmarshal(b []byte) error { + if len(b) < 2 { + return fmt.Errorf("byte array is less than 2 bytes: %d", len(b)) + } + + //The first byte of the file always has the value 5 + if b[0] != keytabFirstByte { + return errors.New("invalid keytab data. First byte does not equal 5") + } + //Get keytab version + //The 2nd byte contains the version number (1 or 2) + kt.version = b[1] + if kt.version != 1 && kt.version != 2 { + return errors.New("invalid keytab data. Keytab version is neither 1 nor 2") + } + //Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order + var endian binary.ByteOrder + endian = binary.BigEndian + if kt.version == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + /* + After the two-byte version indicator, the file contains a sequence of signed 32-bit record lengths followed by key records or holes. + A positive record length indicates a valid key entry whose size is equal to or less than the record length. + A negative length indicates a zero-filled hole whose size is the inverse of the length. + A length of 0 indicates the end of the file. + */ + // n tracks position in the byte array + n := 2 + l, err := readInt32(b, &n, &endian) + if err != nil { + return err + } + for l != 0 { + if l < 0 { + //Zero padded so skip over + l = l * -1 + n = n + int(l) + } else { + //fmt.Printf("Bytes for entry: %v\n", b[n:n+int(l)]) + if n < 0 { + return fmt.Errorf("%d can't be less than zero", n) + } + if n+int(l) > len(b) { + return fmt.Errorf("%s's length is less than %d", b, n+int(l)) + } + eb := b[n : n+int(l)] + n = n + int(l) + ke := newKeytabEntry() + // p keeps track as to where we are in the byte stream + var p int + var err error + parsePrincipal(eb, &p, kt, &ke, &endian) + ke.Timestamp, err = readTimestamp(eb, &p, &endian) + if err != nil { + return err + } + rei8, err := readInt8(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO8 = uint8(rei8) + rei16, err := readInt16(eb, &p, &endian) + if err != nil { + return err + } + ke.Key.KeyType = int32(rei16) + rei16, err = readInt16(eb, &p, &endian) + if err != nil { + return err + } + kl := int(rei16) + ke.Key.KeyValue, err = readBytes(eb, &p, kl, &endian) + if err != nil { + return err + } + //The 32-bit key version overrides the 8-bit key version. + // To determine if it is present, the implementation must check that at least 4 bytes remain in the record after the other fields are read, + // and that the value of the 32-bit integer contained in those bytes is non-zero. + if len(eb)-p >= 4 { + // The 32-bit key may be present + ri32, err := readInt32(eb, &p, &endian) + if err != nil { + return err + } + ke.KVNO = uint32(ri32) + } + if ke.KVNO == 0 { + // Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8 + ke.KVNO = uint32(ke.KVNO8) + } + // Add the entry to the keytab + kt.Entries = append(kt.Entries, ke) + } + // Check if there are still 4 bytes left to read + // Also check that n is greater than zero + if n < 0 || n > len(b) || len(b[n:]) < 4 { + break + } + // Read the size of the next entry + l, err = readInt32(b, &n, &endian) + if err != nil { + return err + } + } + return nil +} + +func (e entry) marshal(v int) ([]byte, error) { + var b []byte + pb, err := e.Principal.marshal(v) + if err != nil { + return b, err + } + b = append(b, pb...) + + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + + t := make([]byte, 9) + endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix())) + t[4] = e.KVNO8 + endian.PutUint16(t[5:7], uint16(e.Key.KeyType)) + endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue))) + b = append(b, t...) + + buf := new(bytes.Buffer) + err = binary.Write(buf, endian, e.Key.KeyValue) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + + t = make([]byte, 4) + endian.PutUint32(t, e.KVNO) + b = append(b, t...) + + // Add the length header + t = make([]byte, 4) + endian.PutUint32(t, uint32(len(b))) + b = append(t, b...) + return b, nil +} + +// Parse the Keytab bytes of a principal into a Keytab entry's principal. +func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error { + var err error + ke.Principal.NumComponents, err = readInt16(b, p, e) + if err != nil { + return err + } + if kt.version == 1 { + //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 + ke.Principal.NumComponents-- + } + lenRealm, err := readInt16(b, p, e) + if err != nil { + return err + } + realmB, err := readBytes(b, p, int(lenRealm), e) + if err != nil { + return err + } + ke.Principal.Realm = string(realmB) + for i := 0; i < int(ke.Principal.NumComponents); i++ { + l, err := readInt16(b, p, e) + if err != nil { + return err + } + compB, err := readBytes(b, p, int(l), e) + if err != nil { + return err + } + ke.Principal.Components = append(ke.Principal.Components, string(compB)) + } + if kt.version != 1 { + //Name Type is omitted in version 1 + ke.Principal.NameType, err = readInt32(b, p, e) + if err != nil { + return err + } + } + return nil +} + +func (p principal) marshal(v int) ([]byte, error) { + //var b []byte + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(p.NumComponents)) + realm, err := marshalString(p.Realm, v) + if err != nil { + return b, err + } + b = append(b, realm...) + for _, c := range p.Components { + cb, err := marshalString(c, v) + if err != nil { + return b, err + } + b = append(b, cb...) + } + if v != 1 { + t := make([]byte, 4) + endian.PutUint32(t, uint32(p.NameType)) + b = append(b, t...) + } + return b, nil +} + +func marshalString(s string, v int) ([]byte, error) { + sb := []byte(s) + b := make([]byte, 2) + var endian binary.ByteOrder + endian = binary.BigEndian + if v == 1 && isNativeEndianLittle() { + endian = binary.LittleEndian + } + endian.PutUint16(b[0:], uint16(len(sb))) + buf := new(bytes.Buffer) + err := binary.Write(buf, endian, sb) + if err != nil { + return b, err + } + b = append(b, buf.Bytes()...) + return b, err +} + +// Read bytes representing a timestamp. +func readTimestamp(b []byte, p *int, e *binary.ByteOrder) (time.Time, error) { + i32, err := readInt32(b, p, e) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(i32), 0), nil +} + +// Read bytes representing an eight bit integer. +func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 1) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+1) + } + buf := bytes.NewBuffer(b[*p : *p+1]) + binary.Read(buf, *e, &i) + *p++ + return +} + +// Read bytes representing a sixteen bit integer. +func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 2) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+2) + } + + buf := bytes.NewBuffer(b[*p : *p+2]) + binary.Read(buf, *e, &i) + *p += 2 + return +} + +// Read bytes representing a thirty two bit integer. +func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32, err error) { + if *p < 0 { + return 0, fmt.Errorf("%d cannot be less than zero", *p) + } + + if (*p + 4) > len(b) { + return 0, fmt.Errorf("%s's length is less than %d", b, *p+4) + } + + buf := bytes.NewBuffer(b[*p : *p+4]) + binary.Read(buf, *e, &i) + *p += 4 + return +} + +func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) ([]byte, error) { + if s < 0 { + return nil, fmt.Errorf("%d cannot be less than zero", s) + } + i := *p + s + if i > len(b) { + return nil, fmt.Errorf("%s's length is greater than %d", b, i) + } + buf := bytes.NewBuffer(b[*p:i]) + r := make([]byte, s) + if err := binary.Read(buf, *e, &r); err != nil { + return nil, err + } + *p += s + return r, nil +} + +func isNativeEndianLittle() bool { + var x = 0x012345678 + var p = unsafe.Pointer(&x) + var bp = (*[4]byte)(p) + + var endian bool + if 0x01 == bp[0] { + endian = false + } else if (0x78 & 0xff) == (bp[0] & 0xff) { + endian = true + } else { + // Default to big endian + endian = false + } + return endian +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go new file mode 100644 index 00000000..d591bdeb --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go @@ -0,0 +1,67 @@ +// Package krberror provides error type and functions for gokrb5. +package krberror + +import ( + "fmt" + "strings" +) + +// Error type descriptions. +const ( + separator = " < " + EncodingError = "Encoding_Error" + NetworkingError = "Networking_Error" + DecryptingError = "Decrypting_Error" + EncryptingError = "Encrypting_Error" + ChksumError = "Checksum_Error" + KRBMsgError = "KRBMessage_Handling_Error" + ConfigError = "Configuration_Error" + KDCError = "KDC_Error" +) + +// Krberror is an error type for gokrb5 +type Krberror struct { + RootCause string + EText []string +} + +// Error function to implement the error interface. +func (e Krberror) Error() string { + return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator) +} + +// Add another error statement to the error. +func (e *Krberror) Add(et string, s string) { + e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...) +} + +// NewKrberror creates a new instance of Krberror. +func NewKrberror(et, s string) Krberror { + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} + +// Errorf appends to or creates a new Krberror. +func Errorf(err error, et, format string, a ...interface{}) Krberror { + if e, ok := err.(Krberror); ok { + e.Add(et, fmt.Sprintf(format, a...)) + return e + } + return NewErrorf(et, format+": %s", append(a, err)...) +} + +// NewErrorf creates a new Krberror from a formatted string. +func NewErrorf(et, format string, a ...interface{}) Krberror { + var s string + if len(a) > 0 { + s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...)) + } else { + s = fmt.Sprintf("%s: %s", et, format) + } + return Krberror{ + RootCause: et, + EText: []string{s}, + } +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go new file mode 100644 index 00000000..9c244f07 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go @@ -0,0 +1,64 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/* +AP-REP ::= [APPLICATION 15] SEQUENCE { +pvno [0] INTEGER (5), +msg-type [1] INTEGER (15), +enc-part [2] EncryptedData -- EncAPRepPart +} + +EncAPRepPart ::= [APPLICATION 27] SEQUENCE { + ctime [0] KerberosTime, + cusec [1] Microseconds, + subkey [2] EncryptionKey OPTIONAL, + seq-number [3] UInt32 OPTIONAL +} +*/ + +// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2. +type APRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:2"` +} + +// EncAPRepPart is the encrypted part of KRB_AP_REP. +type EncAPRepPart struct { + CTime time.Time `asn1:"generalized,explicit,tag:0"` + Cusec int `asn1:"explicit,tag:1"` + Subkey types.EncryptionKey `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` +} + +// Unmarshal bytes b into the APRep struct. +func (a *APRep) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_AP_REP + if a.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType) + } + return nil +} + +// Unmarshal bytes b into the APRep encrypted part struct. +func (a *EncAPRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error") + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go new file mode 100644 index 00000000..e1ed4ae1 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go @@ -0,0 +1,220 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/*AP-REQ ::= [APPLICATION 14] SEQUENCE { +pvno [0] INTEGER (5), +msg-type [1] INTEGER (14), +ap-options [2] APOptions, +ticket [3] Ticket, +authenticator [4] EncryptedData -- Authenticator +} + +APOptions ::= KerberosFlags +-- reserved(0), +-- use-session-key(1), +-- mutual-required(2)*/ + +type marshalAPReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` +} + +// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1. +type APReq struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + APOptions asn1.BitString `asn1:"explicit,tag:2"` + Ticket Ticket `asn1:"explicit,tag:3"` + EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"` + Authenticator types.Authenticator `asn1:"optional"` +} + +// NewAPReq generates a new KRB_AP_REQ struct. +func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) { + var a APReq + ed, err := encryptAuthenticator(auth, sessionKey, tkt) + if err != nil { + return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ") + } + a = APReq{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AP_REQ, + APOptions: types.NewKrbFlags(), + Ticket: tkt, + EncryptedAuthenticator: ed, + } + return a, nil +} + +// Encrypt Authenticator +func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) { + var ed types.EncryptedData + m, err := a.Marshal() + if err != nil { + return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator") + } + usage := authenticatorKeyUsage(tkt.SName) + ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO) + if err != nil { + return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator") + } + return ed, nil +} + +// DecryptAuthenticator decrypts the Authenticator within the AP_REQ. +// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ. +func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) error { + usage := authenticatorKeyUsage(a.Ticket.SName) + ab, e := crypto.DecryptEncPart(a.EncryptedAuthenticator, sessionKey, uint32(usage)) + if e != nil { + return fmt.Errorf("error decrypting authenticator: %v", e) + } + err := a.Authenticator.Unmarshal(ab) + if err != nil { + return fmt.Errorf("error unmarshaling authenticator: %v", err) + } + return nil +} + +func authenticatorKeyUsage(pn types.PrincipalName) int { + if pn.NameString[0] == "krbtgt" { + return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR + } + return keyusage.AP_REQ_AUTHENTICATOR +} + +// Unmarshal bytes b into the APReq struct. +func (a *APReq) Unmarshal(b []byte) error { + var m marshalAPReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ") + } + if m.MsgType != msgtype.KRB_AP_REQ { + return NewKRBError(types.PrincipalName{}, "", errorcode.KRB_AP_ERR_MSG_TYPE, errorcode.Lookup(errorcode.KRB_AP_ERR_MSG_TYPE)) + } + a.PVNO = m.PVNO + a.MsgType = m.MsgType + a.APOptions = m.APOptions + a.EncryptedAuthenticator = m.EncryptedAuthenticator + a.Ticket, err = unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ") + } + return nil +} + +// Marshal APReq struct. +func (a *APReq) Marshal() ([]byte, error) { + m := marshalAPReq{ + PVNO: a.PVNO, + MsgType: a.MsgType, + APOptions: a.APOptions, + EncryptedAuthenticator: a.EncryptedAuthenticator, + } + var b []byte + b, err := a.Ticket.Marshal() + if err != nil { + return b, err + } + m.Ticket = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 3, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ) + return mk, nil +} + +// Verify an AP_REQ using service's keytab, spn and max acceptable clock skew duration. +// The service ticket encrypted part and authenticator will be decrypted as part of this operation. +func (a *APReq) Verify(kt *keytab.Keytab, d time.Duration, cAddr types.HostAddress) (bool, error) { + // Decrypt ticket's encrypted part with service key + //TODO decrypt with service's session key from its TGT is use-to-user. Need to figure out how to get TGT. + //if types.IsFlagSet(&a.APOptions, flags.APOptionUseSessionKey) { + // //If the USE-SESSION-KEY flag is set in the ap-options field, it indicates to + // //the server that user-to-user authentication is in use, and that the ticket + // //is encrypted in the session key from the server's TGT rather than in the server's secret key. + // err := a.Ticket.Decrypt(tgt.DecryptedEncPart.Key) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of ticket provided using session key") + // } + //} else { + // // Because it is possible for the server to be registered in multiple + // // realms, with different keys in each, the srealm field in the + // // unencrypted portion of the ticket in the KRB_AP_REQ is used to + // // specify which secret key the server should use to decrypt that + // // ticket.The KRB_AP_ERR_NOKEY error code is returned if the server + // // doesn't have the proper key to decipher the ticket. + // // The ticket is decrypted using the version of the server's key + // // specified by the ticket. + // err := a.Ticket.DecryptEncPart(*kt, &a.Ticket.SName) + // if err != nil { + // return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + // } + //} + err := a.Ticket.DecryptEncPart(kt, &a.Ticket.SName) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided") + } + + // Check time validity of ticket + ok, err := a.Ticket.Valid(d) + if err != nil || !ok { + return ok, err + } + + // Check client's address is listed in the client addresses in the ticket + if len(a.Ticket.DecryptedEncPart.CAddr) > 0 { + //The addresses in the ticket (if any) are then searched for an address matching the operating-system reported + //address of the client. If no match is found or the server insists on ticket addresses but none are present in + //the ticket, the KRB_AP_ERR_BADADDR error is returned. + if !types.HostAddressesContains(a.Ticket.DecryptedEncPart.CAddr, cAddr) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "client address not within the list contained in the service ticket") + } + } + + // Decrypt authenticator with session key from ticket's encrypted part + err = a.DecryptAuthenticator(a.Ticket.DecryptedEncPart.Key) + if err != nil { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BAD_INTEGRITY, "could not decrypt authenticator") + } + + // Check CName in authenticator is the same as that in the ticket + if !a.Authenticator.CName.Equal(a.Ticket.DecryptedEncPart.CName) { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADMATCH, "CName in Authenticator does not match that in service ticket") + } + + // Check the clock skew between the client and the service server + ct := a.Authenticator.CTime.Add(time.Duration(a.Authenticator.Cusec) * time.Microsecond) + t := time.Now().UTC() + if t.Sub(ct) > d || ct.Sub(t) > d { + return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_SKEW, fmt.Sprintf("clock skew with client too large. greater than %v seconds", d)) + } + return true, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go new file mode 100644 index 00000000..76c89c34 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go @@ -0,0 +1,312 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.2 + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/credentials" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/flags" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/patype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type marshalKDCRep struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:2"` + CRealm string `asn1:"generalstring,explicit,tag:3"` + CName types.PrincipalName `asn1:"explicit,tag:4"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + Ticket asn1.RawValue `asn1:"explicit,tag:5"` + EncPart types.EncryptedData `asn1:"explicit,tag:6"` +} + +// KDCRepFields represents the KRB_KDC_REP fields. +type KDCRepFields struct { + PVNO int + MsgType int + PAData []types.PAData + CRealm string + CName types.PrincipalName + Ticket Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKDCRepPart +} + +// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type ASRep struct { + KDCRepFields +} + +// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. +type TGSRep struct { + KDCRepFields +} + +// EncKDCRepPart is the encrypted part of KRB_KDC_REP. +type EncKDCRepPart struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + LastReqs []LastReq `asn1:"explicit,tag:1"` + Nonce int `asn1:"explicit,tag:2"` + KeyExpiration time.Time `asn1:"generalized,explicit,optional,tag:3"` + Flags asn1.BitString `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + SRealm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + CAddr []types.HostAddress `asn1:"explicit,optional,tag:11"` + EncPAData types.PADataSequence `asn1:"explicit,optional,tag:12"` +} + +// LastReq part of KRB_KDC_REP. +type LastReq struct { + LRType int32 `asn1:"explicit,tag:0"` + LRValue time.Time `asn1:"generalized,explicit,tag:1"` +} + +// Unmarshal bytes b into the ASRep struct. +func (k *ASRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_AS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Unmarshal bytes b into the TGSRep struct. +func (k *TGSRep) Unmarshal(b []byte) error { + var m marshalKDCRep + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + if m.MsgType != msgtype.KRB_TGS_REP { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType) + } + //Process the raw ticket within + tkt, err := unmarshalTicket(m.Ticket.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP") + } + k.KDCRepFields = KDCRepFields{ + PVNO: m.PVNO, + MsgType: m.MsgType, + PAData: m.PAData, + CRealm: m.CRealm, + CName: m.CName, + Ticket: tkt, + EncPart: m.EncPart, + } + return nil +} + +// Unmarshal bytes b into encrypted part of KRB_KDC_REP. +func (e *EncKDCRepPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart)) + if err != nil { + // Try using tag 26 + /* Ref: RFC 4120 + Compatibility note: Some implementations unconditionally send an + encrypted EncTGSRepPart (application tag number 26) in this field + regardless of whether the reply is a AS-REP or a TGS-REP. In the + interest of compatibility, implementors MAY relax the check on the + tag number of the decrypted ENC-PART.*/ + _, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP") + } + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of an AS_REP. +func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) { + var key types.EncryptionKey + var err error + if c.HasKeytab() { + key, err = c.Keytab().GetEncryptionKey(k.CName, k.CRealm, k.EncPart.KVNO, k.EncPart.EType) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if c.HasPassword() { + key, _, err = crypto.GetKeyFromPassword(c.Password(), k.CName, k.CRealm, k.EncPart.EType, k.PAData) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + } + if !c.HasKeytab() && !c.HasPassword() { + return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to perform decryption of AS_REP encrypted part") + } + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART) + if err != nil { + return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP") + } + k.DecryptedEncPart = denc + return key, nil +} + +// Verify checks the validity of AS_REP message. +func (k *ASRep) Verify(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) { + //Ref RFC 4120 Section 3.1.5 + if k.CName.NameType != asReq.ReqBody.CName.NameType || k.CName.NameString == nil { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) + } + for i := range k.CName.NameString { + if k.CName.NameString[i] != asReq.ReqBody.CName.NameString[i] { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) + } + } + if k.CRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm) + } + key, err := k.DecryptEncPart(creds) + if err != nil { + return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP") + } + if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + if k.DecryptedEncPart.SName.NameType != asReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) + } + for i := range k.CName.NameString { + if k.DecryptedEncPart.SName.NameString[i] != asReq.ReqBody.SName.NameString[i] { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) + } + } + if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(asReq.ReqBody.Addresses) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ") + } + } + t := time.Now().UTC() + if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds()) + } + // RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11 + if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) { + if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation") + } + for _, pa := range k.DecryptedEncPart.EncPAData { + if pa.PADataType == patype.PA_REQ_ENC_PA_REP { + var pafast types.PAReqEncPARep + err := pafast.Unmarshal(pa.PADataValue) + if err != nil { + return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP") + } + etype, err := crypto.GetChksumEtype(pafast.ChksumType) + if err != nil { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error") + } + ab, _ := asReq.Marshal() + if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) { + return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid") + } + } + } + } + return true, nil +} + +// DecryptEncPart decrypts the encrypted part of an TGS_REP. +func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart") + } + var denc EncKDCRepPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part") + } + k.DecryptedEncPart = denc + return nil +} + +// Verify checks the validity of the TGS_REP message. +func (k *TGSRep) Verify(cfg *config.Config, tgsReq TGSReq) (bool, error) { + if k.CName.NameType != tgsReq.ReqBody.CName.NameType || k.CName.NameString == nil { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName type in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) + } + for i := range k.CName.NameString { + if k.CName.NameString[i] != tgsReq.ReqBody.CName.NameString[i] { + return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) + } + } + if k.Ticket.Realm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm) + } + if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce { + return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") + } + //if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName) + //} + //for i := range k.Ticket.SName.NameString { + // if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName) + // } + //} + //if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + //} + //for i := range k.DecryptedEncPart.SName.NameString { + // if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { + // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) + // } + //} + if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm { + return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) + } + if len(k.DecryptedEncPart.CAddr) > 0 { + if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) { + return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ") + } + } + if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { + return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds()) + } + } + return true, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go new file mode 100644 index 00000000..f75ddc4e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go @@ -0,0 +1,432 @@ +package messages + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.4.1 + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/config" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/flags" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/nametype" + "gopkg.in/jcmturner/gokrb5.v7/iana/patype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type marshalKDCReq struct { + PVNO int `asn1:"explicit,tag:1"` + MsgType int `asn1:"explicit,tag:2"` + PAData types.PADataSequence `asn1:"explicit,optional,tag:3"` + ReqBody asn1.RawValue `asn1:"explicit,tag:4"` +} + +// KDCReqFields represents the KRB_KDC_REQ fields. +type KDCReqFields struct { + PVNO int + MsgType int + PAData types.PADataSequence + ReqBody KDCReqBody + Renewal bool +} + +// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type ASReq struct { + KDCReqFields +} + +// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. +type TGSReq struct { + KDCReqFields +} + +type marshalKDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag + AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"` +} + +// KDCReqBody implements the KRB_KDC_REQ request body. +type KDCReqBody struct { + KDCOptions asn1.BitString `asn1:"explicit,tag:0"` + CName types.PrincipalName `asn1:"explicit,optional,tag:1"` + Realm string `asn1:"generalstring,explicit,tag:2"` + SName types.PrincipalName `asn1:"explicit,optional,tag:3"` + From time.Time `asn1:"generalized,explicit,optional,tag:4"` + Till time.Time `asn1:"generalized,explicit,tag:5"` + RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + Nonce int `asn1:"explicit,tag:7"` + EType []int32 `asn1:"explicit,tag:8"` + Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` + EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` + AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"` +} + +// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request. +func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_SRV_INST, + NameString: []string{"krbtgt", realm}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request. +func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { + sname := types.PrincipalName{ + NameType: nametype.KRB_NT_PRINCIPAL, + NameString: []string{"kadmin", "changepw"}, + } + return NewASReq(realm, c, cname, sname) +} + +// NewASReq generates a new KRB_AS_REQ struct for a given SNAME. +func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return ASReq{}, err + } + t := time.Now().UTC() + // Copy the default options to make this thread safe + kopts := types.NewKrbFlags() + copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes) + kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength + a := ASReq{ + KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_AS_REQ, + PAData: types.PADataSequence{}, + ReqBody: KDCReqBody{ + KDCOptions: kopts, + Realm: realm, + CName: cname, + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTktEnctypeIDs, + }, + }, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime != 0 { + types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) + a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return a, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + a.ReqBody.Addresses = ha + } + return a, nil +} + +// NewTGSReq generates a new KRB_TGS_REQ struct. +func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + err = a.setPAData(tgt, sessionKey) + return a, err +} + +// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7) +func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) { + a, err := tgsReq(cname, sname, kdcRealm, renewal, c) + if err != nil { + return a, err + } + a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT} + types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey) + err = a.setPAData(clientTGT, sessionKey) + return a, err +} + +// tgsReq populates the fields for a TGS_REQ +func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) { + nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) + if err != nil { + return TGSReq{}, err + } + t := time.Now().UTC() + k := KDCReqFields{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_TGS_REQ, + ReqBody: KDCReqBody{ + KDCOptions: types.NewKrbFlags(), + Realm: kdcRealm, + CName: cname, // Add the CName to make validation of the reply easier + SName: sname, + Till: t.Add(c.LibDefaults.TicketLifetime), + Nonce: int(nonce.Int64()), + EType: c.LibDefaults.DefaultTGSEnctypeIDs, + }, + Renewal: renewal, + } + if c.LibDefaults.Forwardable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable) + } + if c.LibDefaults.Canonicalize { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize) + } + if c.LibDefaults.Proxiable { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable) + } + if c.LibDefaults.RenewLifetime > time.Duration(0) { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) + } + if !c.LibDefaults.NoAddresses { + ha, err := types.LocalHostAddresses() + if err != nil { + return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err) + } + ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) + k.ReqBody.Addresses = ha + } + if renewal { + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew) + types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable) + } + return TGSReq{ + k, + }, nil +} + +func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error { + // Marshal the request and calculate checksum + b, err := k.ReqBody.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body") + } + etype, err := crypto.GetEtype(sessionKey.KeyType) + if err != nil { + return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator") + } + cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM) + if err != nil { + return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash") + } + + // Form PAData for TGS_REQ + // Create authenticator + auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") + } + auth.Cksum = types.Checksum{ + CksumType: etype.GetHashID(), + Checksum: cb, + } + // Create AP_REQ + apReq, err := NewAPReq(tgt, sessionKey, auth) + if err != nil { + return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ") + } + apb, err := apReq.Marshal() + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data") + } + k.PAData = types.PADataSequence{ + types.PAData{ + PADataType: patype.PA_TGS_REQ, + PADataValue: apb, + }, + } + return nil +} + +// Unmarshal bytes b into the ASReq struct. +func (k *ASReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ") + } + expectedMsgType := msgtype.KRB_AS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the TGSReq struct. +func (k *TGSReq) Unmarshal(b []byte) error { + var m marshalKDCReq + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ") + } + expectedMsgType := msgtype.KRB_TGS_REQ + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + var reqb KDCReqBody + err = reqb.Unmarshal(m.ReqBody.Bytes) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body") + } + k.MsgType = m.MsgType + k.PAData = m.PAData + k.PVNO = m.PVNO + k.ReqBody = reqb + return nil +} + +// Unmarshal bytes b into the KRB_KDC_REQ body struct. +func (k *KDCReqBody) Unmarshal(b []byte) error { + var m marshalKDCReqBody + _, err := asn1.Unmarshal(b, &m) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body") + } + k.KDCOptions = m.KDCOptions + if len(k.KDCOptions.Bytes) < 4 { + tb := make([]byte, 4-len(k.KDCOptions.Bytes)) + k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...) + k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8 + } + k.CName = m.CName + k.Realm = m.Realm + k.SName = m.SName + k.From = m.From + k.Till = m.Till + k.RTime = m.RTime + k.Nonce = m.Nonce + k.EType = m.EType + k.Addresses = m.Addresses + k.EncAuthData = m.EncAuthData + if len(m.AdditionalTickets.Bytes) > 0 { + k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets") + } + } + return nil +} + +// Marshal ASReq struct. +func (k *ASReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ) + return mk, nil +} + +// Marshal TGSReq struct. +func (k *TGSReq) Marshal() ([]byte, error) { + m := marshalKDCReq{ + PVNO: k.PVNO, + MsgType: k.MsgType, + PAData: k.PAData, + } + b, err := k.ReqBody.Marshal() + if err != nil { + var mk []byte + return mk, err + } + m.ReqBody = asn1.RawValue{ + Class: asn1.ClassContextSpecific, + IsCompound: true, + Tag: 4, + Bytes: b, + } + mk, err := asn1.Marshal(m) + if err != nil { + return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") + } + mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ) + return mk, nil +} + +// Marshal KRB_KDC_REQ body struct. +func (k *KDCReqBody) Marshal() ([]byte, error) { + var b []byte + m := marshalKDCReqBody{ + KDCOptions: k.KDCOptions, + CName: k.CName, + Realm: k.Realm, + SName: k.SName, + From: k.From, + Till: k.Till, + RTime: k.RTime, + Nonce: k.Nonce, + EType: k.EType, + Addresses: k.Addresses, + EncAuthData: k.EncAuthData, + } + rawtkts, err := MarshalTicketSequence(k.AdditionalTickets) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets") + } + //The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody + rawtkts.Tag = 11 + if len(rawtkts.Bytes) > 0 { + m.AdditionalTickets = rawtkts + } + b, err = asn1.Marshal(m) + if err != nil { + return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body") + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go new file mode 100644 index 00000000..380cf80f --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go @@ -0,0 +1,102 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +type marshalKRBCred struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + Tickets asn1.RawValue `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` +} + +// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1. +type KRBCred struct { + PVNO int + MsgType int + Tickets []Ticket + EncPart types.EncryptedData + DecryptedEncPart EncKrbCredPart +} + +// EncKrbCredPart is the encrypted part of KRB_CRED. +type EncKrbCredPart struct { + TicketInfo []KrbCredInfo `asn1:"explicit,tag:0"` + Nouce int `asn1:"optional,explicit,tag:1"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:2"` + Usec int `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"optional,explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED. +type KrbCredInfo struct { + Key types.EncryptionKey `asn1:"explicit,tag:0"` + PRealm string `asn1:"generalstring,optional,explicit,tag:1"` + PName types.PrincipalName `asn1:"optional,explicit,tag:2"` + Flags asn1.BitString `asn1:"optional,explicit,tag:3"` + AuthTime time.Time `asn1:"generalized,optional,explicit,tag:4"` + StartTime time.Time `asn1:"generalized,optional,explicit,tag:5"` + EndTime time.Time `asn1:"generalized,optional,explicit,tag:6"` + RenewTill time.Time `asn1:"generalized,optional,explicit,tag:7"` + SRealm string `asn1:"optional,explicit,ia5,tag:8"` + SName types.PrincipalName `asn1:"optional,explicit,tag:9"` + CAddr types.HostAddresses `asn1:"optional,explicit,tag:10"` +} + +// Unmarshal bytes b into the KRBCred struct. +func (k *KRBCred) Unmarshal(b []byte) error { + var m marshalKRBCred + _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_CRED + if m.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) + } + k.PVNO = m.PVNO + k.MsgType = m.MsgType + k.EncPart = m.EncPart + if len(m.Tickets.Bytes) > 0 { + k.Tickets, err = unmarshalTicketsSequence(m.Tickets) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED") + } + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of a KRB_CRED. +func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART) + if err != nil { + return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart") + } + var denc EncKrbCredPart + err = denc.Unmarshal(b) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED") + } + k.DecryptedEncPart = denc + return nil +} + +// Unmarshal bytes b into the encrypted part of KRB_CRED. +func (k *EncKrbCredPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart") + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go new file mode 100644 index 00000000..5aa9def6 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go @@ -0,0 +1,83 @@ +// Package messages implements Kerberos 5 message types and methods. +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/iana" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1. +type KRBError struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + CTime time.Time `asn1:"generalized,optional,explicit,tag:2"` + Cusec int `asn1:"optional,explicit,tag:3"` + STime time.Time `asn1:"generalized,explicit,tag:4"` + Susec int `asn1:"explicit,tag:5"` + ErrorCode int32 `asn1:"explicit,tag:6"` + CRealm string `asn1:"generalstring,optional,explicit,tag:7"` + CName types.PrincipalName `asn1:"optional,explicit,tag:8"` + Realm string `asn1:"generalstring,explicit,tag:9"` + SName types.PrincipalName `asn1:"explicit,tag:10"` + EText string `asn1:"generalstring,optional,explicit,tag:11"` + EData []byte `asn1:"optional,explicit,tag:12"` +} + +// NewKRBError creates a new KRBError. +func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError { + t := time.Now().UTC() + return KRBError{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_ERROR, + STime: t, + Susec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), + ErrorCode: code, + SName: sname, + Realm: realm, + EText: etext, + } +} + +// Unmarshal bytes b into the KRBError struct. +func (k *KRBError) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error") + } + expectedMsgType := msgtype.KRB_ERROR + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Error method implementing error interface on KRBError struct. +func (k KRBError) Error() string { + etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode)) + if k.EText != "" { + etxt = fmt.Sprintf("%s - %s", etxt, k.EText) + } + return etxt +} + +func processUnmarshalReplyError(b []byte, err error) error { + switch err.(type) { + case asn1.StructuralError: + var krberr KRBError + tmperr := krberr.Unmarshal(b) + if tmperr != nil { + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } + return krberr + default: + return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") + } +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go new file mode 100644 index 00000000..ebc5d3d0 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go @@ -0,0 +1,108 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1. +type KRBPriv struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncKrbPrivPart `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncKrbPrivPart is the encrypted part of KRB_PRIV. +type EncKrbPrivPart struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// NewKRBPriv returns a new KRBPriv type. +func NewKRBPriv(part EncKrbPrivPart) KRBPriv { + return KRBPriv{ + PVNO: iana.PVNO, + MsgType: msgtype.KRB_PRIV, + DecryptedEncPart: part, + } +} + +// Unmarshal bytes b into the KRBPriv struct. +func (k *KRBPriv) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_PRIV + if k.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) + } + return nil +} + +// Unmarshal bytes b into the EncKrbPrivPart struct. +func (k *EncKrbPrivPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart)) + if err != nil { + return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error") + } + return nil +} + +// Marshal the KRBPriv. +func (k *KRBPriv) Marshal() ([]byte, error) { + tk := KRBPriv{ + PVNO: k.PVNO, + MsgType: k.MsgType, + EncPart: k.EncPart, + } + b, err := asn1.Marshal(tk) + if err != nil { + return []byte{}, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv) + return b, nil +} + +// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv. +// Use to prepare for marshaling. +func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error { + b, err := asn1.Marshal(k.DecryptedEncPart) + if err != nil { + return err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart) + k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1) + if err != nil { + return err + } + return nil +} + +// DecryptEncPart decrypts the encrypted part of the KRBPriv message. +func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART) + if err != nil { + return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err) + } + err = k.DecryptedEncPart.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go new file mode 100644 index 00000000..9c5acc1d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go @@ -0,0 +1,61 @@ +package messages + +import ( + "fmt" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/msgtype" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +/* +KRB-SAFE ::= [APPLICATION 20] SEQUENCE { + pvno [0] INTEGER (5), + msg-type [1] INTEGER (20), + safe-body [2] KRB-SAFE-BODY, + cksum [3] Checksum +} + +KRB-SAFE-BODY ::= SEQUENCE { + user-data [0] OCTET STRING, + timestamp [1] KerberosTime OPTIONAL, + usec [2] Microseconds OPTIONAL, + seq-number [3] UInt32 OPTIONAL, + s-address [4] HostAddress, + r-address [5] HostAddress OPTIONAL +} +*/ + +// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1. +type KRBSafe struct { + PVNO int `asn1:"explicit,tag:0"` + MsgType int `asn1:"explicit,tag:1"` + SafeBody KRBSafeBody `asn1:"explicit,tag:2"` + Cksum types.Checksum `asn1:"explicit,tag:3"` +} + +// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE. +type KRBSafeBody struct { + UserData []byte `asn1:"explicit,tag:0"` + Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` + Usec int `asn1:"optional,explicit,tag:2"` + SequenceNumber int64 `asn1:"optional,explicit,tag:3"` + SAddress types.HostAddress `asn1:"explicit,tag:4"` + RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` +} + +// Unmarshal bytes b into the KRBSafe struct. +func (s *KRBSafe) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe)) + if err != nil { + return processUnmarshalReplyError(b, err) + } + expectedMsgType := msgtype.KRB_SAFE + if s.MsgType != expectedMsgType { + return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType) + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go new file mode 100644 index 00000000..49664b81 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go @@ -0,0 +1,265 @@ +package messages + +import ( + "crypto/rand" + "fmt" + "log" + "time" + + "github.com/jcmturner/gofork/encoding/asn1" + "gopkg.in/jcmturner/gokrb5.v7/asn1tools" + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana" + "gopkg.in/jcmturner/gokrb5.v7/iana/adtype" + "gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag" + "gopkg.in/jcmturner/gokrb5.v7/iana/errorcode" + "gopkg.in/jcmturner/gokrb5.v7/iana/flags" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/keytab" + "gopkg.in/jcmturner/gokrb5.v7/krberror" + "gopkg.in/jcmturner/gokrb5.v7/pac" + "gopkg.in/jcmturner/gokrb5.v7/types" +) + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.3 + +// Ticket implements the Kerberos ticket. +type Ticket struct { + TktVNO int `asn1:"explicit,tag:0"` + Realm string `asn1:"generalstring,explicit,tag:1"` + SName types.PrincipalName `asn1:"explicit,tag:2"` + EncPart types.EncryptedData `asn1:"explicit,tag:3"` + DecryptedEncPart EncTicketPart `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works +} + +// EncTicketPart is the encrypted part of the Ticket. +type EncTicketPart struct { + Flags asn1.BitString `asn1:"explicit,tag:0"` + Key types.EncryptionKey `asn1:"explicit,tag:1"` + CRealm string `asn1:"generalstring,explicit,tag:2"` + CName types.PrincipalName `asn1:"explicit,tag:3"` + Transited TransitedEncoding `asn1:"explicit,tag:4"` + AuthTime time.Time `asn1:"generalized,explicit,tag:5"` + StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` + EndTime time.Time `asn1:"generalized,explicit,tag:7"` + RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` + CAddr types.HostAddresses `asn1:"explicit,optional,tag:9"` + AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"` +} + +// TransitedEncoding part of the ticket's encrypted part. +type TransitedEncoding struct { + TRType int32 `asn1:"explicit,tag:0"` + Contents []byte `asn1:"explicit,tag:1"` +} + +// NewTicket creates a new Ticket instance. +func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab *keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) { + etype, err := crypto.GetEtype(eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket") + } + ks := etype.GetKeyByteSize() + kv := make([]byte, ks, ks) + rand.Read(kv) + sessionKey := types.EncryptionKey{ + KeyType: eTypeID, + KeyValue: kv, + } + etp := EncTicketPart{ + Flags: flags, + Key: sessionKey, + CRealm: crealm, + CName: cname, + Transited: TransitedEncoding{}, + AuthTime: authTime, + StartTime: startTime, + EndTime: endTime, + RenewTill: renewTill, + } + b, err := asn1.Marshal(etp) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart") + } + b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart) + skey, err := sktab.GetEncryptionKey(sname, srealm, kvno, eTypeID) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket") + } + ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno) + if err != nil { + return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart") + } + tkt := Ticket{ + TktVNO: iana.PVNO, + Realm: srealm, + SName: sname, + EncPart: ed, + } + return tkt, sessionKey, nil +} + +// Unmarshal bytes b into a Ticket struct. +func (t *Ticket) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket)) + return err +} + +// Marshal the Ticket. +func (t *Ticket) Marshal() ([]byte, error) { + b, err := asn1.Marshal(*t) + if err != nil { + return nil, err + } + b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket) + return b, nil +} + +// Unmarshal bytes b into the EncTicketPart struct. +func (t *EncTicketPart) Unmarshal(b []byte) error { + _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart)) + return err +} + +// unmarshalTicket returns a ticket from the bytes provided. +func unmarshalTicket(b []byte) (t Ticket, err error) { + err = t.Unmarshal(b) + return +} + +// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value. +func unmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) { + //This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid + //We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data. + b := in.Bytes + // Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves + p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes) + var tkts []Ticket + var raw asn1.RawValue + for p < (len(b)) { + _, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket)) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed getting length of ticket: %v", err) + } + t, err := unmarshalTicket(b[p:]) + if err != nil { + return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err) + } + p += len(raw.FullBytes) + tkts = append(tkts, t) + } + MarshalTicketSequence(tkts) + return tkts, nil +} + +// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence. +func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) { + raw := asn1.RawValue{ + Class: 2, + IsCompound: true, + } + if len(tkts) < 1 { + // There are no tickets to marshal + return raw, nil + } + var btkts []byte + for i, t := range tkts { + b, err := t.Marshal() + if err != nil { + return raw, fmt.Errorf("error marshaling ticket number %d in sequence of tickets", i+1) + } + btkts = append(btkts, b...) + } + // The ASN1 wrapping consists of 2 bytes: + // 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG + // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. + // Application Tag: + //| Byte: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | + //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | + //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | + btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...) + btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...) + raw.Bytes = btkts + // If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11) + //fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes) + return raw, nil +} + +// DecryptEncPart decrypts the encrypted part of the ticket. +// The sname argument can be used to specify which service principal's key should be used to decrypt the ticket. +// If nil is passed as the sname then the service principal specified within the ticket it used. +func (t *Ticket) DecryptEncPart(keytab *keytab.Keytab, sname *types.PrincipalName) error { + if sname == nil { + sname = &t.SName + } + key, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + return t.Decrypt(key) +} + +// Decrypt decrypts the encrypted part of the ticket using the key provided. +func (t *Ticket) Decrypt(key types.EncryptionKey) error { + b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET) + if err != nil { + return fmt.Errorf("error decrypting Ticket EncPart: %v", err) + } + var denc EncTicketPart + err = denc.Unmarshal(b) + if err != nil { + return fmt.Errorf("error unmarshaling encrypted part: %v", err) + } + t.DecryptedEncPart = denc + return nil +} + +// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed. +func (t *Ticket) GetPACType(keytab *keytab.Keytab, sname *types.PrincipalName, l *log.Logger) (bool, pac.PACType, error) { + var isPAC bool + for _, ad := range t.DecryptedEncPart.AuthorizationData { + if ad.ADType == adtype.ADIfRelevant { + var ad2 types.AuthorizationData + err := ad2.Unmarshal(ad.ADData) + if err != nil { + l.Printf("PAC authorization data could not be unmarshaled: %v", err) + continue + } + if ad2[0].ADType == adtype.ADWin2KPAC { + isPAC = true + var p pac.PACType + err = p.Unmarshal(ad2[0].ADData) + if err != nil { + return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err) + } + if sname == nil { + sname = &t.SName + } + key, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType) + if err != nil { + return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) + } + err = p.ProcessPACInfoBuffers(key, l) + return isPAC, p, err + } + } + } + return isPAC, pac.PACType{}, nil +} + +// Valid checks it the ticket is currently valid. Max duration passed endtime passed in as argument. +func (t *Ticket) Valid(d time.Duration) (bool, error) { + // Check for future tickets or invalid tickets + time := time.Now().UTC() + if t.DecryptedEncPart.StartTime.Sub(time) > d || types.IsFlagSet(&t.DecryptedEncPart.Flags, flags.Invalid) { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_NYV, "service ticket provided is not yet valid") + } + + // Check for expired ticket + if time.Sub(t.DecryptedEncPart.EndTime) > d { + return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_EXPIRED, "service ticket provided has expired") + } + + return true, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go new file mode 100644 index 00000000..612979e4 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go @@ -0,0 +1,33 @@ +package pac + +import ( + "bytes" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx +type ClientClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *ClientClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go new file mode 100644 index 00000000..ad5212dc --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go @@ -0,0 +1,31 @@ +package pac + +import ( + "bytes" + + "gopkg.in/jcmturner/rpc.v1/mstypes" +) + +// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx +type ClientInfo struct { + ClientID mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time + NameLength uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field. + Name string // An array of 16-bit Unicode characters in little-endian format that contains the client's account name. +} + +// Unmarshal bytes into the ClientInfo struct +func (k *ClientInfo) Unmarshal(b []byte) (err error) { + //The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + k.ClientID, err = r.FileTime() + if err != nil { + return + } + k.NameLength, err = r.Uint16() + if err != nil { + return + } + k.Name, err = r.UTF16String(int(k.NameLength)) + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go new file mode 100644 index 00000000..a8c2c3ca --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go @@ -0,0 +1,86 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/types" + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// https://msdn.microsoft.com/en-us/library/cc237931.aspx + +// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx +type CredentialsInfo struct { + Version uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000. + EType uint32 + PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16) + PACCredentialData CredentialData +} + +// Unmarshal bytes into the CredentialsInfo struct +func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) (err error) { + //The CredentialsInfo structure is a simple structure that is not NDR-encoded. + r := mstypes.NewReader(bytes.NewReader(b)) + + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("credentials info version is not zero") + return + } + c.EType, err = r.Uint32() + if err != nil { + return + } + c.PACCredentialDataEncrypted, err = r.ReadBytes(len(b) - 8) + + err = c.DecryptEncPart(k) + if err != nil { + err = fmt.Errorf("error decrypting PAC Credentials Data: %v", err) + return + } + return +} + +// DecryptEncPart decrypts the encrypted part of the CredentialsInfo. +func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey) error { + if k.KeyType != int32(c.EType) { + return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType) + } + pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT) + if err != nil { + return err + } + err = c.PACCredentialData.Unmarshal(pt) + if err != nil { + return err + } + return nil +} + +// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx +// This structure is encrypted prior to being encoded in any other structures. +// Encryption is performed by first serializing the data structure via Network Data Representation (NDR) encoding, as specified in [MS-RPCE]. +// Once serialized, the data is encrypted using the key and cryptographic system selected through the AS protocol and the KRB_AS_REP message +// Fields (for capturing this information) and cryptographic parameters are specified in PAC_CREDENTIAL_INFO (section 2.6.1). +type CredentialData struct { + CredentialCount uint32 + Credentials []SECPKGSupplementalCred // Size is the value of CredentialCount +} + +// Unmarshal converts the bytes provided into a CredentialData type. +func (c *CredentialData) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(c) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go new file mode 100644 index 00000000..c2299bb0 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go @@ -0,0 +1,33 @@ +package pac + +import ( + "bytes" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx + +// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx +type DeviceClaimsInfo struct { + ClaimsSetMetadata mstypes.ClaimsSetMetadata + ClaimsSet mstypes.ClaimsSet +} + +// Unmarshal bytes into the ClientClaimsInfo struct +func (k *DeviceClaimsInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + m := new(mstypes.ClaimsSetMetadata) + err = dec.Decode(m) + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err) + } + k.ClaimsSetMetadata = *m + k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet() + if err != nil { + err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err) + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go new file mode 100644 index 00000000..51be207a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go @@ -0,0 +1,32 @@ +package pac + +import ( + "bytes" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx +type DeviceInfo struct { + UserID uint32 // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + PrimaryGroupID uint32 // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs. + AccountDomainID mstypes.RPCSID `ndr:"pointer"` // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client. + AccountGroupCount uint32 // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs + AccountGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount. + SIDCount uint32 // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member. + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. + DomainGroupCount uint32 // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs. + DomainGroup []mstypes.DomainGroupMembership `ndr:"pointer,conformant"` // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount. +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *DeviceInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling DeviceInfo: %v", err) + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go new file mode 100644 index 00000000..9dd69d2b --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go @@ -0,0 +1,115 @@ +// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing. +package pac + +import ( + "bytes" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// KERB_VALIDATION_INFO flags. +const ( + USERFLAG_GUEST = 31 // Authentication was done via the GUEST account; no password was used. + USERFLAG_NO_ENCRYPTION_AVAILABLE = 30 // No encryption is available. + USERFLAG_LAN_MANAGER_KEY = 28 // LAN Manager key was used for authentication. + USERFLAG_SUB_AUTH = 25 // Sub-authentication used; session key came from the sub-authentication package. + USERFLAG_EXTRA_SIDS = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs. + USERFLAG_MACHINE_ACCOUNT = 24 // Indicates that the account is a machine account. + USERFLAG_DC_NTLM2 = 23 // Indicates that the domain controller understands NTLMv2. + USERFLAG_RESOURCE_GROUPIDS = 22 // Indicates that the ResourceGroupIds field is populated. + USERFLAG_PROFILEPATH = 21 // Indicates that ProfilePath is populated. + USERFLAG_NTLM2_NTCHALLENGERESP = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_LM2_LMCHALLENGERESP = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. + USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation. +) + +// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx +// The KERB_VALIDATION_INFO structure defines the user's logon and authorization information +// provided by the DC. The KERB_VALIDATION_INFO structure is a subset of the +// NETLOGON_VALIDATION_SAM_INFO4 structure ([MS-NRPC] section 2.2.1.4.13). +// It is a subset due to historical reasons and to the use of the common Active Directory to generate this information. +// The KERB_VALIDATION_INFO structure is marshaled by RPC [MS-RPCE]. +type KerbValidationInfo struct { + LogOnTime mstypes.FileTime + LogOffTime mstypes.FileTime + KickOffTime mstypes.FileTime + PasswordLastSet mstypes.FileTime + PasswordCanChange mstypes.FileTime + PasswordMustChange mstypes.FileTime + EffectiveName mstypes.RPCUnicodeString + FullName mstypes.RPCUnicodeString + LogonScript mstypes.RPCUnicodeString + ProfilePath mstypes.RPCUnicodeString + HomeDirectory mstypes.RPCUnicodeString + HomeDirectoryDrive mstypes.RPCUnicodeString + LogonCount uint16 + BadPasswordCount uint16 + UserID uint32 + PrimaryGroupID uint32 + GroupCount uint32 + GroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` + UserFlags uint32 + UserSessionKey mstypes.UserSessionKey + LogonServer mstypes.RPCUnicodeString + LogonDomainName mstypes.RPCUnicodeString + LogonDomainID mstypes.RPCSID `ndr:"pointer"` + Reserved1 [2]uint32 // Has 2 elements + UserAccountControl uint32 + SubAuthStatus uint32 + LastSuccessfulILogon mstypes.FileTime + LastFailedILogon mstypes.FileTime + FailedILogonCount uint32 + Reserved3 uint32 + SIDCount uint32 + ExtraSIDs []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"` + ResourceGroupDomainSID mstypes.RPCSID `ndr:"pointer"` + ResourceGroupCount uint32 + ResourceGroupIDs []mstypes.GroupMembership `ndr:"pointer,conformant"` +} + +// Unmarshal bytes into the DeviceInfo struct +func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err) + } + return +} + +// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC. +func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string { + var g []string + lSID := k.LogonDomainID.String() + for i := range k.GroupIDs { + g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID)) + } + for _, s := range k.ExtraSIDs { + var exists = false + for _, es := range g { + if es == s.SID.String() { + exists = true + break + } + } + if !exists { + g = append(g, s.SID.String()) + } + } + for _, r := range k.ResourceGroupIDs { + var exists = false + s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.String(), r.RelativeID) + for _, es := range g { + if es == s { + exists = true + break + } + } + if !exists { + g = append(g, s) + } + } + return g +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go new file mode 100644 index 00000000..c73fd067 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go @@ -0,0 +1,251 @@ +package pac + +import ( + "bytes" + "errors" + "fmt" + "log" + + "gopkg.in/jcmturner/gokrb5.v7/crypto" + "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage" + "gopkg.in/jcmturner/gokrb5.v7/types" + "gopkg.in/jcmturner/rpc.v1/mstypes" +) + +const ( + infoTypeKerbValidationInfo uint32 = 1 + infoTypeCredentials uint32 = 2 + infoTypePACServerSignatureData uint32 = 6 + infoTypePACKDCSignatureData uint32 = 7 + infoTypePACClientInfo uint32 = 10 + infoTypeS4UDelegationInfo uint32 = 11 + infoTypeUPNDNSInfo uint32 = 12 + infoTypePACClientClaimsInfo uint32 = 13 + infoTypePACDeviceInfo uint32 = 14 + infoTypePACDeviceClaimsInfo uint32 = 15 +) + +// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx +type PACType struct { + CBuffers uint32 + Version uint32 + Buffers []InfoBuffer + Data []byte + KerbValidationInfo *KerbValidationInfo + CredentialsInfo *CredentialsInfo + ServerChecksum *SignatureData + KDCChecksum *SignatureData + ClientInfo *ClientInfo + S4UDelegationInfo *S4UDelegationInfo + UPNDNSInfo *UPNDNSInfo + ClientClaimsInfo *ClientClaimsInfo + DeviceInfo *DeviceInfo + DeviceClaimsInfo *DeviceClaimsInfo + ZeroSigData []byte +} + +// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx +type InfoBuffer struct { + ULType uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset. + CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset. + Offset uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element. +} + +// Unmarshal bytes into the PACType struct +func (pac *PACType) Unmarshal(b []byte) (err error) { + pac.Data = b + zb := make([]byte, len(b), len(b)) + copy(zb, b) + pac.ZeroSigData = zb + r := mstypes.NewReader(bytes.NewReader(b)) + pac.CBuffers, err = r.Uint32() + if err != nil { + return + } + pac.Version, err = r.Uint32() + if err != nil { + return + } + buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers) + for i := range buf { + buf[i].ULType, err = r.Uint32() + if err != nil { + return + } + buf[i].CBBufferSize, err = r.Uint32() + if err != nil { + return + } + buf[i].Offset, err = r.Uint64() + if err != nil { + return + } + } + pac.Buffers = buf + return nil +} + +// ProcessPACInfoBuffers processes the PAC Info Buffers. +// https://msdn.microsoft.com/en-us/library/cc237954.aspx +func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey, l *log.Logger) error { + for _, buf := range pac.Buffers { + p := make([]byte, buf.CBBufferSize, buf.CBBufferSize) + copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)]) + switch buf.ULType { + case infoTypeKerbValidationInfo: + if pac.KerbValidationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k KerbValidationInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing KerbValidationInfo: %v", err) + } + pac.KerbValidationInfo = &k + case infoTypeCredentials: + // Currently PAC parsing is only useful on the service side in gokrb5 + // The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side. + // Skipping CredentialsInfo - will be revisited under RFC4556 implementation. + continue + //if pac.CredentialsInfo != nil { + // //Must ignore subsequent buffers of this type + // continue + //} + //var k CredentialsInfo + //err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client. + //if err != nil { + // return fmt.Errorf("error processing CredentialsInfo: %v", err) + //} + //pac.CredentialsInfo = &k + case infoTypePACServerSignatureData: + if pac.ServerChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing ServerChecksum: %v", err) + } + pac.ServerChecksum = &k + case infoTypePACKDCSignatureData: + if pac.KDCChecksum != nil { + //Must ignore subsequent buffers of this type + continue + } + var k SignatureData + zb, err := k.Unmarshal(p) + copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) + if err != nil { + return fmt.Errorf("error processing KDCChecksum: %v", err) + } + pac.KDCChecksum = &k + case infoTypePACClientInfo: + if pac.ClientInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k ClientInfo + err := k.Unmarshal(p) + if err != nil { + return fmt.Errorf("error processing ClientInfo: %v", err) + } + pac.ClientInfo = &k + case infoTypeS4UDelegationInfo: + if pac.S4UDelegationInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k S4UDelegationInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process S4U_DelegationInfo: %v", err) + continue + } + pac.S4UDelegationInfo = &k + case infoTypeUPNDNSInfo: + if pac.UPNDNSInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k UPNDNSInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process UPN_DNSInfo: %v", err) + continue + } + pac.UPNDNSInfo = &k + case infoTypePACClientClaimsInfo: + if pac.ClientClaimsInfo != nil || len(p) < 1 { + //Must ignore subsequent buffers of this type + continue + } + var k ClientClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process ClientClaimsInfo: %v", err) + continue + } + pac.ClientClaimsInfo = &k + case infoTypePACDeviceInfo: + if pac.DeviceInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceInfo: %v", err) + continue + } + pac.DeviceInfo = &k + case infoTypePACDeviceClaimsInfo: + if pac.DeviceClaimsInfo != nil { + //Must ignore subsequent buffers of this type + continue + } + var k DeviceClaimsInfo + err := k.Unmarshal(p) + if err != nil { + l.Printf("could not process DeviceClaimsInfo: %v", err) + continue + } + pac.DeviceClaimsInfo = &k + } + } + + if ok, err := pac.verify(key); !ok { + return err + } + + return nil +} + +func (pac *PACType) verify(key types.EncryptionKey) (bool, error) { + if pac.KerbValidationInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo") + } + if pac.ServerChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a ServerChecksum") + } + if pac.KDCChecksum == nil { + return false, errors.New("PAC Info Buffers does not contain a KDCChecksum") + } + if pac.ClientInfo == nil { + return false, errors.New("PAC Info Buffers does not contain a ClientInfo") + } + etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType)) + if err != nil { + return false, err + } + if ok := etype.VerifyChecksum(key.KeyValue, + pac.ZeroSigData, + pac.ServerChecksum.Signature, + keyusage.KERB_NON_KERB_CKSUM_SALT); !ok { + return false, errors.New("PAC service checksum verification failed") + } + + return true, nil +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go new file mode 100644 index 00000000..614ee856 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go @@ -0,0 +1,26 @@ +package pac + +import ( + "bytes" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx +type S4UDelegationInfo struct { + S4U2proxyTarget mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket. + TransitedListSize uint32 + S4UTransitedServices []mstypes.RPCUnicodeString `ndr:"pointer,conformant"` // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize +} + +// Unmarshal bytes into the S4UDelegationInfo struct +func (k *S4UDelegationInfo) Unmarshal(b []byte) (err error) { + dec := ndr.NewDecoder(bytes.NewReader(b)) + err = dec.Decode(k) + if err != nil { + err = fmt.Errorf("error unmarshaling S4UDelegationInfo: %v", err) + } + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go new file mode 100644 index 00000000..7e0fce1a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go @@ -0,0 +1,79 @@ +package pac + +import ( + "bytes" + + "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" + "gopkg.in/jcmturner/rpc.v1/mstypes" +) + +/* +https://msdn.microsoft.com/en-us/library/cc237955.aspx + +The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9). + +Server Signature (SignatureType = 0x00000006) +https://msdn.microsoft.com/en-us/library/cc237957.aspx +The KDC will use the long-term key that the KDC shares with the server, so that the server can verify this signature on receiving a PAC. +The server signature is a keyed hash [RFC4757] of the entire PAC message, with the Signature fields of both PAC_SIGNATURE_DATA structures set to zero. +The key used to protect the ciphertext part of the response is used. +The checksum type corresponds to the key unless the key is DES, in which case the KERB_CHECKSUM_HMAC_MD5 key is used. +The resulting hash value is then placed in the Signature field of the server's PAC_SIGNATURE_DATA structure. + +KDC Signature (SignatureType = 0x00000007) +https://msdn.microsoft.com/en-us/library/dd357117.aspx +The KDC will use KDC (krbtgt) key [RFC4120], so that other KDCs can verify this signature on receiving a PAC. +The KDC signature is a keyed hash [RFC4757] of the Server Signature field in the PAC message. +The cryptographic system that is used to calculate the checksum depends on which system the KDC supports, as defined below: +- Supports RC4-HMAC --> KERB_CHECKSUM_HMAC_MD5 +- Does not support RC4-HMAC and supports AES256 --> HMAC_SHA1_96_AES256 +- Does not support RC4-HMAC or AES256-CTS-HMAC-SHA1-96, and supports AES128-CTS-HMAC-SHA1-96 --> HMAC_SHA1_96_AES128 +- Does not support RC4-HMAC, AES128-CTS-HMAC-SHA1-96 or AES256-CTS-HMAC-SHA1-96 --> None. The checksum operation will fail. +*/ + +// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx +type SignatureData struct { + SignatureType uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12). + Signature []byte // Size depends on the type. See comment above. + RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist. +} + +// Unmarshal bytes into the SignatureData struct +func (k *SignatureData) Unmarshal(b []byte) (rb []byte, err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + + k.SignatureType, err = r.Uint32() + if err != nil { + return + } + + var c int + switch k.SignatureType { + case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: + c = 16 + case uint32(chksumtype.HMAC_SHA1_96_AES128): + c = 12 + case uint32(chksumtype.HMAC_SHA1_96_AES256): + c = 12 + } + k.Signature, err = r.ReadBytes(c) + if err != nil { + return + } + + // When the KDC is not an Read Only Domain Controller (RODC), this field does not exist. + if len(b) >= 4+c+2 { + k.RODCIdentifier, err = r.Uint16() + if err != nil { + return + } + } + + // Create bytes with zeroed signature needed for checksum verification + rb = make([]byte, len(b), len(b)) + copy(rb, b) + z := make([]byte, len(b), len(b)) + copy(rb[4:4+c], z) + + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go new file mode 100644 index 00000000..5f4f93c2 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go @@ -0,0 +1,90 @@ +package pac + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "gopkg.in/jcmturner/rpc.v1/mstypes" + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +const ( + // NTLMSupCredLMOWF indicates that the LM OWF member is present and valid. + NTLMSupCredLMOWF uint32 = 31 + // NTLMSupCredNTOWF indicates that the NT OWF member is present and valid. + NTLMSupCredNTOWF uint32 = 30 +) + +// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx +type NTLMSupplementalCred struct { + Version uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000. + Flags uint32 + LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LMPassword member MUST be ignored if the L flag is not set in the Flags member. + NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The NTPassword member MUST be ignored if the N flag is not set in the Flags member. +} + +// Unmarshal converts the bytes provided into a NTLMSupplementalCred. +func (c *NTLMSupplementalCred) Unmarshal(b []byte) (err error) { + r := mstypes.NewReader(bytes.NewReader(b)) + c.Version, err = r.Uint32() + if err != nil { + return + } + if c.Version != 0 { + err = errors.New("NTLMSupplementalCred version is not zero") + return + } + c.Flags, err = r.Uint32() + if err != nil { + return + } + if isFlagSet(c.Flags, NTLMSupCredLMOWF) { + c.LMPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + if isFlagSet(c.Flags, NTLMSupCredNTOWF) { + c.NTPassword, err = r.ReadBytes(16) + if err != nil { + return + } + } + return +} + +// isFlagSet tests if a flag is set in the uint32 little endian flag +func isFlagSet(f uint32, i uint32) bool { + //Which byte? + b := int(i / 8) + //Which bit in byte + p := uint(7 - (int(i) - 8*b)) + fb := make([]byte, 4) + binary.LittleEndian.PutUint32(fb, f) + if fb[b]&(1<: +func GetHostAddress(s string) (HostAddress, error) { + var h HostAddress + cAddr, _, err := net.SplitHostPort(s) + if err != nil { + return h, fmt.Errorf("invalid format of client address: %v", err) + } + ip := net.ParseIP(cAddr) + var ht int32 + if ip.To4() != nil { + ht = addrtype.IPv4 + ip = ip.To4() + } else if ip.To16() != nil { + ht = addrtype.IPv6 + ip = ip.To16() + } else { + return h, fmt.Errorf("could not determine client's address types: %v", err) + } + h = HostAddress{ + AddrType: ht, + Address: ip, + } + return h, nil +} + +// GetAddress returns a string representation of the HostAddress. +func (h *HostAddress) GetAddress() (string, error) { + var b []byte + _, err := asn1.Unmarshal(h.Address, &b) + return string(b), err +} + +// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses. +func LocalHostAddresses() (ha HostAddresses, err error) { + ifs, err := net.Interfaces() + if err != nil { + return + } + for _, iface := range ifs { + if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { + // Interface is either loopback of not up + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + var a HostAddress + if ip.To16() == nil { + //neither IPv4 or IPv6 + continue + } + if ip.To4() != nil { + //Is IPv4 + a.AddrType = addrtype.IPv4 + a.Address = ip.To4() + } else { + a.AddrType = addrtype.IPv6 + a.Address = ip.To16() + } + ha = append(ha, a) + } + } + return ha, nil +} + +// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP +func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) { + for _, ip := range ips { + ha = append(ha, HostAddressFromNetIP(ip)) + } + return ha +} + +// HostAddressFromNetIP returns a HostAddress type from a net.IP +func HostAddressFromNetIP(ip net.IP) HostAddress { + if ip.To4() != nil { + //Is IPv4 + return HostAddress{ + AddrType: addrtype.IPv4, + Address: ip.To4(), + } + } + return HostAddress{ + AddrType: addrtype.IPv6, + Address: ip.To16(), + } +} + +// HostAddressesEqual tests if two HostAddress slices are equal. +func HostAddressesEqual(h, a []HostAddress) bool { + if len(h) != len(a) { + return false + } + for _, e := range a { + var found bool + for _, i := range h { + if e.Equal(i) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice. +func HostAddressesContains(h []HostAddress, a HostAddress) bool { + for _, e := range h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if the HostAddress is equal to another HostAddress provided. +func (h *HostAddress) Equal(a HostAddress) bool { + if h.AddrType != a.AddrType { + return false + } + return bytes.Equal(h.Address, a.Address) +} + +// Contains tests if a HostAddress is contained within the HostAddresses struct. +func (h *HostAddresses) Contains(a HostAddress) bool { + for _, e := range *h { + if e.Equal(a) { + return true + } + } + return false +} + +// Equal tests if a HostAddress slice is equal to the HostAddresses struct. +func (h *HostAddresses) Equal(a []HostAddress) bool { + if len(*h) != len(a) { + return false + } + for _, e := range a { + if !h.Contains(e) { + return false + } + } + return true +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go new file mode 100644 index 00000000..bd75d5b5 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go @@ -0,0 +1,124 @@ +package types + +// Reference: https://www.ietf.org/rfc/rfc4120.txt +// Section: 5.2.8 + +import ( + "github.com/jcmturner/gofork/encoding/asn1" +) + +/* +KerberosFlags + +For several message types, a specific constrained bit string type, +KerberosFlags, is used. + +KerberosFlags ::= BIT STRING (SIZE (32..MAX)) +-- minimum number of bits shall be sent, +-- but no fewer than 32 + +Compatibility note: The following paragraphs describe a change from +the RFC 1510 description of bit strings that would result in +incompatility in the case of an implementation that strictly +conformed to ASN.1 DER and RFC 1510. + +ASN.1 bit strings have multiple uses. The simplest use of a bit +string is to contain a vector of bits, with no particular meaning +attached to individual bits. This vector of bits is not necessarily +a multiple of eight bits long. The use in Kerberos of a bit string +as a compact boolean vector wherein each element has a distinct +meaning poses some problems. The natural notation for a compact +boolean vector is the ASN.1 "NamedBit" notation, and the DER require +that encodings of a bit string using "NamedBit" notation exclude any +trailing zero bits. This truncation is easy to neglect, especially +given C language implementations that naturally choose to store +boolean vectors as 32-bit integers. + +For example, if the notation for KDCOptions were to include the +"NamedBit" notation, as in RFC 1510, and a KDCOptions value to be +encoded had only the "forwardable" (bit number one) bit set, the DER +encoding MUST include only two bits: the first reserved bit +("reserved", bit number zero, value zero) and the one-valued bit (bit +number one) for "forwardable". + +Most existing implementations of Kerberos unconditionally send 32 +bits on the wire when encoding bit strings used as boolean vectors. +This behavior violates the ASN.1 syntax used for flag values in RFC +1510, but it occurs on such a widely installed base that the protocol +description is being modified to accommodate it. + +Consequently, this document removes the "NamedBit" notations for +individual bits, relegating them to comments. The size constraint on +the KerberosFlags type requires that at least 32 bits be encoded at +all times, though a lenient implementation MAY choose to accept fewer +than 32 bits and to treat the missing bits as set to zero. + +Currently, no uses of KerberosFlags specify more than 32 bits' worth +of flags, although future revisions of this document may do so. When +more than 32 bits are to be transmitted in a KerberosFlags value, +future revisions to this document will likely specify that the +smallest number of bits needed to encode the highest-numbered one- +valued bit should be sent. This is somewhat similar to the DER +encoding of a bit string that is declared with the "NamedBit" +notation. +*/ + +// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags. +func NewKrbFlags() asn1.BitString { + f := asn1.BitString{} + f.Bytes = make([]byte, 4) + f.BitLength = len(f.Bytes) * 8 + return f +} + +// SetFlags sets the flags of an ASN1 BitString. +func SetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + SetFlag(f, i) + } +} + +// SetFlag sets a flag in an ASN1 BitString. +func SetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] | (1 << p) +} + +// UnsetFlags unsets flags in an ASN1 BitString. +func UnsetFlags(f *asn1.BitString, j []int) { + for _, i := range j { + UnsetFlag(f, i) + } +} + +// UnsetFlag unsets a flag in an ASN1 BitString. +func UnsetFlag(f *asn1.BitString, i int) { + for l := len(f.Bytes); l < 4; l++ { + (*f).Bytes = append((*f).Bytes, byte(0)) + (*f).BitLength = len((*f).Bytes) * 8 + } + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + (*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p) +} + +// IsFlagSet tests if a flag is set in the ASN1 BitString. +func IsFlagSet(f *asn1.BitString, i int) bool { + //Which byte? + b := i / 8 + //Which bit in byte + p := uint(7 - (i - 8*b)) + if (*f).Bytes[b]&(1</@ +// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1) +// and the realm will be returned as a string. If the "@" suffix +// is not included in the SPN then the value of realm string returned will be "" +func ParseSPNString(spn string) (pn PrincipalName, realm string) { + if strings.Contains(spn, "@") { + s := strings.Split(spn, "@") + realm = s[len(s)-1] + spn = strings.TrimSuffix(spn, "@"+realm) + } + pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) + return +} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go new file mode 100644 index 00000000..19e9f496 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go @@ -0,0 +1,18 @@ +package types + +import "github.com/jcmturner/gofork/encoding/asn1" + +// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedData struct { + DataType int32 `asn1:"explicit,tag:0"` + DataValue []byte `asn1:"optional,explicit,tag:1"` +} + +// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 +type TypedDataSequence []TypedData + +// Unmarshal bytes into the TypedDataSequence. +func (a *TypedDataSequence) Unmarshal(b []byte) error { + _, err := asn1.Unmarshal(b, a) + return err +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE b/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go new file mode 100644 index 00000000..2b375dab --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go @@ -0,0 +1,134 @@ +package mstypes + +import ( + "bytes" + "errors" + + "gopkg.in/jcmturner/rpc.v1/ndr" +) + +// Compression format assigned numbers. +const ( + CompressionFormatNone uint16 = 0 + CompressionFormatLZNT1 uint16 = 2 + CompressionFormatXPress uint16 = 3 + CompressionFormatXPressHuff uint16 = 4 +) + +// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx +const ClaimsSourceTypeAD uint16 = 1 + +// Claim Type assigned numbers +const ( + ClaimTypeIDInt64 uint16 = 1 + ClaimTypeIDUInt64 uint16 = 2 + ClaimTypeIDString uint16 = 3 + ClaimsTypeIDBoolean uint16 = 6 +) + +// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx +type ClaimsBlob struct { + Size uint32 + EncodedBlob EncodedBlob +} + +// EncodedBlob are the bytes of the encoded Claims +type EncodedBlob []byte + +// Size returns the size of the bytes of the encoded Claims +func (b EncodedBlob) Size(c interface{}) int { + cb := c.(ClaimsBlob) + return int(cb.Size) +} + +// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx +type ClaimsSetMetadata struct { + ClaimsSetSize uint32 + ClaimsSetBytes []byte `ndr:"pointer,conformant"` + CompressionFormat uint16 // Enum see constants for options + UncompressedClaimsSetSize uint32 + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata +func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) { + if len(m.ClaimsSetBytes) < 1 { + err = errors.New("no bytes available for ClaimsSet") + return + } + // TODO switch statement to decompress ClaimsSetBytes + if m.CompressionFormat != CompressionFormatNone { + err = errors.New("compressed ClaimsSet not currently supported") + return + } + dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes)) + err = dec.Decode(&c) + return +} + +// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx +type ClaimsSet struct { + ClaimsArrayCount uint32 + ClaimsArrays []ClaimsArray `ndr:"pointer,conformant"` + ReservedType uint16 + ReservedFieldSize uint32 + ReservedField []byte `ndr:"pointer,conformant"` +} + +// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx +type ClaimsArray struct { + ClaimsSourceType uint16 + ClaimsCount uint32 + ClaimEntries []ClaimEntry `ndr:"pointer,conformant"` +} + +// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx +type ClaimEntry struct { + ID string `ndr:"pointer,conformant,varying"` + Type uint16 `ndr:"unionTag"` + TypeInt64 ClaimTypeInt64 `ndr:"unionField"` + TypeUInt64 ClaimTypeUInt64 `ndr:"unionField"` + TypeString ClaimTypeString `ndr:"unionField"` + TypeBool ClaimTypeBoolean `ndr:"unionField"` +} + +// SwitchFunc is the ClaimEntry union field selection function +func (u ClaimEntry) SwitchFunc(_ interface{}) string { + switch u.Type { + case ClaimTypeIDInt64: + return "TypeInt64" + case ClaimTypeIDUInt64: + return "TypeUInt64" + case ClaimTypeIDString: + return "TypeString" + case ClaimsTypeIDBoolean: + return "TypeBool" + } + return "" +} + +// ClaimTypeInt64 is a claim of type int64 +type ClaimTypeInt64 struct { + ValueCount uint32 + Value []int64 `ndr:"pointer,conformant"` +} + +// ClaimTypeUInt64 is a claim of type uint64 +type ClaimTypeUInt64 struct { + ValueCount uint32 + Value []uint64 `ndr:"pointer,conformant"` +} + +// ClaimTypeString is a claim of type string +type ClaimTypeString struct { + ValueCount uint32 + Value []LPWSTR `ndr:"pointer,conformant"` +} + +// ClaimTypeBoolean is a claim of type bool +type ClaimTypeBoolean struct { + ValueCount uint32 + Value []bool `ndr:"pointer,conformant"` +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go new file mode 100644 index 00000000..62cac286 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go @@ -0,0 +1,10 @@ +package mstypes + +// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx +type LPWSTR struct { + Value string `ndr:"pointer,conformant,varying"` +} + +func (s *LPWSTR) String() string { + return s.Value +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go new file mode 100644 index 00000000..5cc952fa --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go @@ -0,0 +1,52 @@ +// Package mstypes implements representations of Microsoft types +package mstypes + +import ( + "time" +) + +/* +FILETIME is a windows data structure. +Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx +It contains two parts that are 32bit integers: + dwLowDateTime + dwHighDateTime +We need to combine these two into one 64bit integer. +This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC) +*/ + +const unixEpochDiff = 116444736000000000 + +// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx +type FileTime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Time return a golang Time type from the FileTime +func (ft FileTime) Time() time.Time { + ns := (ft.MSEpoch() - unixEpochDiff) * 100 + return time.Unix(0, int64(ns)).UTC() +} + +// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC. +func (ft FileTime) MSEpoch() int64 { + return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime) +} + +// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC. +func (ft FileTime) Unix() int64 { + return (ft.MSEpoch() - unixEpochDiff) / 10000000 +} + +// GetFileTime returns a FileTime type from the provided Golang Time type. +func GetFileTime(t time.Time) FileTime { + ns := t.UnixNano() + fp := (ns / 100) + unixEpochDiff + hd := fp >> 32 + ld := fp - (hd << 32) + return FileTime{ + LowDateTime: uint32(ld), + HighDateTime: uint32(hd), + } +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go new file mode 100644 index 00000000..79151378 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go @@ -0,0 +1,19 @@ +package mstypes + +// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx +// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group. +// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES +type GroupMembership struct { + RelativeID uint32 + Attributes uint32 +} + +// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx +// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device. +// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs. +// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount. +type DomainGroupMembership struct { + DomainID RPCSID `ndr:"pointer"` + GroupCount uint32 + GroupIDs []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go new file mode 100644 index 00000000..61ac39bb --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go @@ -0,0 +1,23 @@ +package mstypes + +// Attributes of a security group membership and can be combined by using the bitwise OR operation. +// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision. +const ( + SEGroupMandatory = 31 + SEGroupEnabledByDefault = 30 + SEGroupEnabled = 29 + SEGroupOwner = 28 + SEGroupResource = 2 + //All other bits MUST be set to zero and MUST be ignored on receipt. +) + +// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx +type KerbSidAndAttributes struct { + SID RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure. + Attributes uint32 +} + +// SetFlag sets a flag in a uint32 attribute value. +func SetFlag(a *uint32, i uint) { + *a = *a | (1 << (31 - i)) +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go new file mode 100644 index 00000000..24495bca --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go @@ -0,0 +1,109 @@ +package mstypes + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Reader reads simple byte stream data into a Go representations +type Reader struct { + r *bufio.Reader // source of the data +} + +// NewReader creates a new instance of a simple Reader. +func NewReader(r io.Reader) *Reader { + reader := new(Reader) + reader.r = bufio.NewReader(r) + return reader +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.r.Read(p) +} + +func (r *Reader) Uint8() (uint8, error) { + b, err := r.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +func (r *Reader) Uint16() (uint16, error) { + b, err := r.ReadBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return binary.LittleEndian.Uint16(b), nil +} + +func (r *Reader) Uint32() (uint32, error) { + b, err := r.ReadBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return binary.LittleEndian.Uint32(b), nil +} + +func (r *Reader) Uint64() (uint64, error) { + b, err := r.ReadBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return binary.LittleEndian.Uint64(b), nil +} + +func (r *Reader) FileTime() (f FileTime, err error) { + f.LowDateTime, err = r.Uint32() + if err != nil { + return + } + f.HighDateTime, err = r.Uint32() + if err != nil { + return + } + return +} + +// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string +func (r *Reader) UTF16String(n int) (str string, err error) { + //Length divided by 2 as each run is 16bits = 2bytes + s := make([]rune, n/2, n/2) + for i := 0; i < len(s); i++ { + var u uint16 + u, err = r.Uint16() + if err != nil { + return + } + s[i] = rune(u) + } + str = string(s) + return +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (r *Reader) ReadBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := r.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go new file mode 100644 index 00000000..4bf02e0e --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go @@ -0,0 +1,13 @@ +package mstypes + +// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx +type RPCUnicodeString struct { + Length uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character. + MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length. + Value string `ndr:"pointer,conformant,varying"` +} + +// String returns the RPCUnicodeString string value +func (r *RPCUnicodeString) String() string { + return r.Value +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go new file mode 100644 index 00000000..98a9c5ab --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go @@ -0,0 +1,32 @@ +package mstypes + +import ( + "encoding/binary" + "encoding/hex" + "fmt" +) + +// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx +type RPCSID struct { + Revision uint8 // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. + SubAuthorityCount uint8 // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. + IdentifierAuthority [6]byte // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. + SubAuthority []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. +} + +// String returns the string representation of the RPC_SID. +func (s *RPCSID) String() string { + var str string + b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...) + // For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx + i := binary.BigEndian.Uint64(b) + if i >= 4294967296 { + str = fmt.Sprintf("S-1-0x%s", hex.EncodeToString(s.IdentifierAuthority[:])) + } else { + str = fmt.Sprintf("S-1-%d", i) + } + for _, sub := range s.SubAuthority { + str = fmt.Sprintf("%s-%d", str, sub) + } + return str +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go new file mode 100644 index 00000000..fcf0a5d9 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go @@ -0,0 +1,11 @@ +package mstypes + +// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx +type CypherBlock struct { + Data [8]byte // size = 8 +} + +// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx +type UserSessionKey struct { + CypherBlock [2]CypherBlock // size = 2 +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go new file mode 100644 index 00000000..5e2def2a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go @@ -0,0 +1,413 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// intFromTag returns an int that is a value in a struct tag key/value pair +func intFromTag(tag reflect.StructTag, key string) (int, error) { + ndrTag := parseTags(tag) + d := 1 + if n, ok := ndrTag.Map[key]; ok { + i, err := strconv.Atoi(n) + if err != nil { + return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err) + } + d = i + } + return d, nil +} + +// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level. +func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Array && t.Kind() != reflect.Slice { + return + } + l = append(l, v.Len()) + if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice { + // contains array or slice + var m []int + m, tb = parseDimensions(v.Index(0)) + l = append(l, m...) + } else { + tb = t.Elem() + } + return +} + +// sliceDimensions returns the count of dimensions a slice has. +func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Slice { + d++ + var n int + n, tb = sliceDimensions(t.Elem()) + d += n + } else { + tb = t + } + return +} + +// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices. +// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions +func makeSubSlices(v reflect.Value, l []int) { + ty := v.Type().Elem() + if ty.Kind() != reflect.Slice { + return + } + for i := 0; i < v.Len(); i++ { + s := reflect.MakeSlice(ty, l[0], l[0]) + v.Index(i).Set(s) + // Are there more sub dimensions? + if len(l) > 1 { + makeSubSlices(v.Index(i), l[1:]) + } + } + return +} + +// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice. +// The input is a slice of integers that indicates the max size/length of each dimension +func multiDimensionalIndexPermutations(l []int) (ps [][]int) { + z := make([]int, len(l), len(l)) // The zeros permutation + ps = append(ps, z) + // for each dimension, in reverse + for i := len(l) - 1; i >= 0; i-- { + ws := make([][]int, len(ps)) + copy(ws, ps) + //create a permutation for each of the iterations of the current dimension + for j := 1; j <= l[i]-1; j++ { + // For each existing permutation + for _, p := range ws { + np := make([]int, len(p), len(p)) + copy(np, p) + np[i] = j + ps = append(ps, np) + } + } + } + return +} + +// precedingMax reads off the next conformant max value +func (dec *Decoder) precedingMax() uint32 { + m := dec.conformantMax[0] + dec.conformantMax = dec.conformantMax[1:] + return m +} + +// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it. +func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + l, t := parseDimensions(v) + if t.Kind() == reflect.String { + tag = reflect.StructTag(subStringArrayTag) + } + if len(l) < 1 { + return errors.New("could not establish dimensions of fixed array") + } + if len(l) == 1 { + err := dec.fillUniDimensionalFixedArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err) + } + return nil + } + // Fixed array is multidimensional + ps := multiDimensionalIndexPermutations(l[:len(l)-1]) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + // fill with the last dimension array + err := dec.fillUniDimensionalFixedArray(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err) + } + } + return nil +} + +// readUniDimensionalFixedArray reads an array (not slice) from the byte stream. +func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + for i := 0; i < v.Len(); i++ { + err := dec.fill(v.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of fixed array: %v", i, err) + } + } + return nil +} + +// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantArray(v, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + n := int(m) + a := reflect.MakeSlice(v.Type(), n, n) + for i := 0; i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the max size of each dimensions from the ndr stream + l := make([]int, d, d) + for i := range l { + l[i] = int(dec.precedingMax()) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + for _, i := range p { + a = a.Index(i) + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err) + } + t := v.Type() + // Total size of the array is the offset in the index being passed plus the actual count of elements being passed. + n := int(s + o) + a := reflect.MakeSlice(t, n, n) + // Populate the array starting at the offset specified + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read size of dimension %d: %v", i+1, err) + } + l[i] = int(s) + int(off) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, l[0], l[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, l[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(l) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions? + for i, j := range p { + if j < o[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} + +// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice. +func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, t := sliceDimensions(v.Type()) + if d > 1 { + err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def) + if err != nil { + return err + } + } else { + err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def) + if err != nil { + return err + } + } + return nil +} + +// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value. +func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + m := dec.precedingMax() + o, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err) + } + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err) + } + if m < o+s { + return errors.New("max count is less than the offset plus actual count") + } + t := v.Type() + n := int(s) + a := reflect.MakeSlice(t, n, n) + for i := int(o); i < n; i++ { + err := dec.fill(a.Index(i), tag, def) + if err != nil { + return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err) + } + } + v.Set(a) + return nil +} + +// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data. +// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this +// method not to panic. +func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error { + // Read the offset and actual count of each dimensions from the ndr stream + m := make([]int, d, d) + for i := range m { + m[i] = int(dec.precedingMax()) + } + o := make([]int, d, d) + l := make([]int, d, d) + for i := range l { + off, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err) + } + o[i] = int(off) + s, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err) + } + if m[i] < int(s)+int(off) { + m[i] = int(s) + int(off) + } + l[i] = int(s) + } + // Initialise size of slices + // Initialise the size of the 1st dimension + ty := v.Type() + v.Set(reflect.MakeSlice(ty, m[0], m[0])) + // Initialise the size of the other dimensions recursively + makeSubSlices(v, m[1:]) + + // Get all permutations of the indexes and go through each and fill + ps := multiDimensionalIndexPermutations(m) + for _, p := range ps { + // Get current multi-dimensional index to fill + a := v + var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed + for i, j := range p { + if j < o[i] || j >= l[i] { + os = true + break + } + a = a.Index(j) + } + if os { + // This permutation should be skipped as it is less than the offset for one of the dimensions. + continue + } + err := dec.fill(a, tag, def) + if err != nil { + return fmt.Errorf("could not fill index %v of slice: %v", p, err) + } + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go new file mode 100644 index 00000000..6157b4ef --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go @@ -0,0 +1,393 @@ +// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures +package ndr + +import ( + "bufio" + "fmt" + "io" + "reflect" + "strings" +) + +// Struct tag values +const ( + TagConformant = "conformant" + TagVarying = "varying" + TagPointer = "pointer" + TagPipe = "pipe" +) + +// Decoder unmarshals NDR byte stream data into a Go struct representation +type Decoder struct { + r *bufio.Reader // source of the data + size int // initial size of bytes in buffer + ch CommonHeader // NDR common header + ph PrivateHeader // NDR private header + conformantMax []uint32 // conformant max values that were moved to the beginning of the structure + s interface{} // pointer to the structure being populated + current []string // keeps track of the current field being populated +} + +type deferedPtr struct { + v reflect.Value + tag reflect.StructTag +} + +// NewDecoder creates a new instance of a NDR Decoder. +func NewDecoder(r io.Reader) *Decoder { + dec := new(Decoder) + dec.r = bufio.NewReader(r) + dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0 + dec.size = dec.r.Buffered() + return dec +} + +// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided. +func (dec *Decoder) Decode(s interface{}) error { + dec.s = s + err := dec.readCommonHeader() + if err != nil { + return err + } + err = dec.readPrivateHeader() + if err != nil { + return err + } + _, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these. + if err != nil { + return Errorf("unable to process byte stream: %v", err) + } + + return dec.process(s, reflect.StructTag("")) +} + +func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error { + // Scan for conformant fields as their max counts are moved to the beginning + // http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37 + err := dec.scanConformantArrays(s, tag) + if err != nil { + return err + } + // Recursively fill the struct fields + var localDef []deferedPtr + err = dec.fill(s, tag, &localDef) + if err != nil { + return Errorf("could not decode: %v", err) + } + // Read any deferred referents associated with pointers + for _, p := range localDef { + err = dec.process(p.v, p.tag) + if err != nil { + return fmt.Errorf("could not decode deferred referent: %v", err) + } + } + return nil +} + +// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for +// dimensions of the array that are moved to the beginning of the structure. +func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error { + err := dec.conformantScan(s, tag) + if err != nil { + return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err) + } + for i := range dec.conformantMax { + dec.conformantMax[i], err = dec.readUint32() + if err != nil { + return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err) + } + } + return nil +} + +// conformantScan inspects the structure's fields for whether they are conformant. +func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + return nil + } + v := getReflectValue(s) + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag) + if err != nil { + return err + } + } + case reflect.String: + if !ndrTag.HasValue(TagConformant) { + break + } + dec.conformantMax = append(dec.conformantMax, uint32(0)) + case reflect.Slice: + if !ndrTag.HasValue(TagConformant) { + break + } + d, t := sliceDimensions(v.Type()) + for i := 0; i < d; i++ { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + // For string arrays there is a common max for the strings within the array. + if t.Kind() == reflect.String { + dec.conformantMax = append(dec.conformantMax, uint32(0)) + } + } + return nil +} + +func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) { + // Pointer so defer filling the referent + ndrTag := parseTags(tag) + if ndrTag.HasValue(TagPointer) { + p, err := dec.readUint32() + if err != nil { + return true, fmt.Errorf("could not read pointer: %v", err) + } + ndrTag.delete(TagPointer) + if p != 0 { + // if pointer is not zero add to the deferred items at end of stream + *def = append(*def, deferedPtr{v, ndrTag.StructTag()}) + } + return true, nil + } + return false, nil +} + +func getReflectValue(s interface{}) (v reflect.Value) { + if r, ok := s.(reflect.Value); ok { + v = r + } else { + if reflect.ValueOf(s).Kind() == reflect.Ptr { + v = reflect.ValueOf(s).Elem() + } + } + return +} + +// fill populates fields with values from the NDR byte stream. +func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error { + v := getReflectValue(s) + + //// Pointer so defer filling the referent + ptr, err := dec.isPointer(v, tag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if ptr { + return nil + } + + // Populate the value from the byte stream + switch v.Kind() { + case reflect.Struct: + dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled + // in case struct is a union, track this and the selected union field for efficiency + var unionTag reflect.Value + var unionField string // field to fill if struct is a union + // Go through each field in the struct and recursively fill + for i := 0; i < v.NumField(); i++ { + fieldName := v.Type().Field(i).Name + dec.current = append(dec.current, fieldName) //Track the current field being filled + //fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/")) + structTag := v.Type().Field(i).Tag + ndrTag := parseTags(structTag) + + // Union handling + if !unionTag.IsValid() { + // Is this field a union tag? + unionTag = dec.isUnion(v.Field(i), structTag) + } else { + // What is the selected field value of the union if we don't already know + if unionField == "" { + unionField, err = unionSelectedField(v, unionTag) + if err != nil { + return fmt.Errorf("could not determine selected union value field for %s with discriminat"+ + " tag %s: %v", v.Type().Name(), unionTag, err) + } + } + if ndrTag.HasValue(TagUnionField) && fieldName != unionField { + // is a union and this field has not been selected so will skip it. + dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker + continue + } + } + + // Check if field is a pointer + if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && + v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + structTag, err = addSizeToTag(v, v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err) + } + ptr, err := dec.isPointer(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + if !ptr { + err := dec.readRawBytes(v.Field(i), structTag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + } else { + err := dec.fill(v.Field(i), structTag, localDef) + if err != nil { + return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + } + dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker + case reflect.Bool: + i, err := dec.readBool() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint8: + i, err := dec.readUint8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint16: + i, err := dec.readUint16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint32: + i, err := dec.readUint32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Uint64: + i, err := dec.readUint64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int8: + i, err := dec.readInt8() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int16: + i, err := dec.readInt16() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int32: + i, err := dec.readInt32() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Int64: + i, err := dec.readInt64() + if err != nil { + return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.String: + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + // strings are always varying so this is assumed without an explicit tag + var s string + var err error + if conformant { + s, err = dec.readConformantVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with conformant varying string: %v", err) + } + } else { + s, err = dec.readVaryingString(localDef) + if err != nil { + return fmt.Errorf("could not fill with varying string: %v", err) + } + } + v.Set(reflect.ValueOf(s)) + case reflect.Float32: + i, err := dec.readFloat32() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Float64: + i, err := dec.readFloat64() + if err != nil { + return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err) + } + v.Set(reflect.ValueOf(i)) + case reflect.Array: + err := dec.fillFixedArray(v, tag, localDef) + if err != nil { + return err + } + case reflect.Slice: + if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 { + //field is for rawbytes + err := dec.readRawBytes(v, tag) + if err != nil { + return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err) + } + break + } + ndrTag := parseTags(tag) + conformant := ndrTag.HasValue(TagConformant) + varying := ndrTag.HasValue(TagVarying) + if ndrTag.HasValue(TagPipe) { + err := dec.fillPipe(v, tag) + if err != nil { + return err + } + break + } + _, t := sliceDimensions(v.Type()) + if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) { + // String array + err := dec.readStringsArray(v, tag, localDef) + if err != nil { + return err + } + break + } + // varying is assumed as fixed arrays use the Go array type rather than slice + if conformant && varying { + err := dec.fillConformantVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else if !conformant && varying { + err := dec.fillVaryingArray(v, tag, localDef) + if err != nil { + return err + } + } else { + //default to conformant and not varying + err := dec.fillConformantArray(v, tag, localDef) + if err != nil { + return err + } + } + default: + return fmt.Errorf("unsupported type") + } + return nil +} + +// readBytes returns a number of bytes from the NDR byte stream. +func (dec *Decoder) readBytes(n int) ([]byte, error) { + //TODO make this take an int64 as input to allow for larger values on all systems? + b := make([]byte, n, n) + m, err := dec.r.Read(b) + if err != nil || m != n { + return b, fmt.Errorf("error reading bytes from stream: %v", err) + } + return b, nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go new file mode 100644 index 00000000..9971194d --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go @@ -0,0 +1,18 @@ +package ndr + +import "fmt" + +// Malformed implements the error interface for malformed NDR encoding errors. +type Malformed struct { + EText string +} + +// Error implements the error interface on the Malformed struct. +func (e Malformed) Error() string { + return fmt.Sprintf("malformed NDR stream: %s", e.EText) +} + +// Errorf formats an error message into a malformed NDR error. +func Errorf(format string, a ...interface{}) Malformed { + return Malformed{EText: fmt.Sprintf(format, a...)} +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go new file mode 100644 index 00000000..1970ddb6 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go @@ -0,0 +1,116 @@ +package ndr + +import ( + "encoding/binary" + "fmt" +) + +/* +Serialization Version 1 +https://msdn.microsoft.com/en-us/library/cc243563.aspx + +Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx +8 bytes in total: +- First byte - Version: Must equal 1 +- Second byte - 1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC) +- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources) +- 4th - Common Header Length: Must equal 8 +- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling. + +Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx +8 bytes in total: +- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself. +- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling. +*/ + +const ( + protocolVersion uint8 = 1 + commonHeaderBytes uint16 = 8 + bigEndian = 0 + littleEndian = 1 + ascii uint8 = 0 + ebcdic uint8 = 1 + ieee uint8 = 0 + vax uint8 = 1 + cray uint8 = 2 + ibm uint8 = 3 +) + +// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx +type CommonHeader struct { + Version uint8 + Endianness binary.ByteOrder + CharacterEncoding uint8 + FloatRepresentation uint8 + HeaderLength uint16 + Filler []byte +} + +// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx +type PrivateHeader struct { + ObjectBufferLength uint32 + Filler []byte +} + +func (dec *Decoder) readCommonHeader() error { + // Version + vb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read first byte of common header for version"} + } + dec.ch.Version = uint8(vb) + if dec.ch.Version != protocolVersion { + return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)} + } + // Read Endianness & Character Encoding + eb, err := dec.r.ReadByte() + if err != nil { + return Malformed{EText: "could not read second byte of common header for endianness"} + } + endian := int(eb >> 4 & 0xF) + if endian != 0 && endian != 1 { + return Malformed{EText: "common header does not indicate a valid endianness"} + } + dec.ch.CharacterEncoding = uint8(vb & 0xF) + if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 { + return Malformed{EText: "common header does not indicate a valid character encoding"} + } + switch endian { + case littleEndian: + dec.ch.Endianness = binary.LittleEndian + case bigEndian: + dec.ch.Endianness = binary.BigEndian + } + // Common header length + lb, err := dec.readBytes(2) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)} + } + dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb) + if dec.ch.HeaderLength != commonHeaderBytes { + return Malformed{EText: "common header does not indicate a valid length"} + } + // Filler bytes + dec.ch.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)} + } + return nil +} + +func (dec *Decoder) readPrivateHeader() error { + // The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types. + err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength) + if err != nil { + return Malformed{EText: "could not read private header object buffer length"} + } + if dec.ph.ObjectBufferLength%8 != 0 { + return Malformed{EText: "object buffer length not a multiple of 8"} + } + // Filler bytes + dec.ph.Filler, err = dec.readBytes(4) + if err != nil { + return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)} + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go new file mode 100644 index 00000000..5fd27da0 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go @@ -0,0 +1,31 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error { + s, err := dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a := reflect.MakeSlice(v.Type(), 0, 0) + c := reflect.MakeSlice(v.Type(), int(s), int(s)) + for s != 0 { + for i := 0; i < int(s); i++ { + err := dec.fill(c.Index(i), tag, &[]deferedPtr{}) + if err != nil { + return fmt.Errorf("could not fill element %d of pipe: %v", i, err) + } + } + s, err = dec.readUint32() // read element count of first chunk + if err != nil { + return err + } + a = reflect.AppendSlice(a, c) + c = reflect.MakeSlice(v.Type(), int(s), int(s)) + } + v.Set(a) + return nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go new file mode 100644 index 00000000..7eb1d1af --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go @@ -0,0 +1,211 @@ +package ndr + +import ( + "bytes" + "encoding/binary" + "math" +) + +// Byte sizes of primitive types +const ( + SizeBool = 1 + SizeChar = 1 + SizeUint8 = 1 + SizeUint16 = 2 + SizeUint32 = 4 + SizeUint64 = 8 + SizeEnum = 2 + SizeSingle = 4 + SizeDouble = 8 + SizePtr = 4 +) + +// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. + +// Char is an NDR character. +// NDR represents a character as one octet. +// Characters have two representation formats: ASCII and EBCDIC. + +// USmall is an unsigned 8 bit integer + +// UShort is an unsigned 16 bit integer + +// ULong is an unsigned 32 bit integer + +// UHyper is an unsigned 64 bit integer + +// Small is an signed 8 bit integer + +// Short is an signed 16 bit integer + +// Long is an signed 32 bit integer + +// Hyper is an signed 64 bit integer + +// Enum is the NDR representation of enumerated types as signed short integers (2 octets) + +// Single is an NDR defined single-precision floating-point data type + +// Double is an NDR defined double-precision floating-point data type + +// readBool reads a byte representing a boolean. +// NDR represents a Boolean as one octet. +// It represents a value of FALSE as a zero octet, an octet in which every bit is reset. +// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set. +func (dec *Decoder) readBool() (bool, error) { + i, err := dec.readUint8() + if err != nil { + return false, err + } + if i != 0 { + return true, nil + } + return false, nil +} + +// readChar reads bytes representing a 8bit ASCII integer cast to a rune. +func (dec *Decoder) readChar() (rune, error) { + var r rune + a, err := dec.readUint8() + if err != nil { + return r, err + } + return rune(a), nil +} + +// readUint8 reads bytes representing a 8bit unsigned integer. +func (dec *Decoder) readUint8() (uint8, error) { + b, err := dec.r.ReadByte() + if err != nil { + return uint8(0), err + } + return uint8(b), nil +} + +// readUint16 reads bytes representing a 16bit unsigned integer. +func (dec *Decoder) readUint16() (uint16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return uint16(0), err + } + return dec.ch.Endianness.Uint16(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint32() (uint32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return uint32(0), err + } + return dec.ch.Endianness.Uint32(b), nil +} + +// readUint32 reads bytes representing a 32bit unsigned integer. +func (dec *Decoder) readUint64() (uint64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return uint64(0), err + } + return dec.ch.Endianness.Uint64(b), nil +} + +func (dec *Decoder) readInt8() (int8, error) { + dec.ensureAlignment(SizeUint8) + b, err := dec.readBytes(SizeUint8) + if err != nil { + return 0, err + } + var i int8 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt16() (int16, error) { + dec.ensureAlignment(SizeUint16) + b, err := dec.readBytes(SizeUint16) + if err != nil { + return 0, err + } + var i int16 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt32() (int32, error) { + dec.ensureAlignment(SizeUint32) + b, err := dec.readBytes(SizeUint32) + if err != nil { + return 0, err + } + var i int32 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +func (dec *Decoder) readInt64() (int64, error) { + dec.ensureAlignment(SizeUint64) + b, err := dec.readBytes(SizeUint64) + if err != nil { + return 0, err + } + var i int64 + buf := bytes.NewReader(b) + err = binary.Read(buf, dec.ch.Endianness, &i) + if err != nil { + return 0, err + } + return i, nil +} + +// https://en.wikipedia.org/wiki/IEEE_754-1985 +func (dec *Decoder) readFloat32() (f float32, err error) { + dec.ensureAlignment(SizeSingle) + b, err := dec.readBytes(SizeSingle) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint32(b) + f = math.Float32frombits(bits) + return +} + +func (dec *Decoder) readFloat64() (f float64, err error) { + dec.ensureAlignment(SizeDouble) + b, err := dec.readBytes(SizeDouble) + if err != nil { + return + } + bits := dec.ch.Endianness.Uint64(b) + f = math.Float64frombits(bits) + return +} + +// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream +// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates +// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the +// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation +// of a primitive. The gap is of the smallest size sufficient to align the primitive. +func (dec *Decoder) ensureAlignment(n int) { + p := dec.size - dec.r.Buffered() + if s := p % n; s != 0 { + dec.r.Discard(n - s) + } +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go new file mode 100644 index 00000000..9ee59fb1 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go @@ -0,0 +1,61 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// type MyBytes []byte +// implement RawBytes interface + +const ( + sizeMethod = "Size" +) + +// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream +type RawBytes interface { + Size(interface{}) int +} + +func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) { + sf := v.MethodByName(sizeMethod) + if !sf.IsValid() { + return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod) + } + in := []reflect.Value{parent} + f := sf.Call(in) + if f[0].Kind() != reflect.Int { + return 0, errors.New("the RawBytes size function did not return an integer") + } + return int(f[0].Int()), nil +} + +func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) { + size, err := rawBytesSize(parent, v) + if err != nil { + return tag, err + } + ndrTag := parseTags(tag) + ndrTag.Map["size"] = strconv.Itoa(size) + return ndrTag.StructTag(), nil +} + +func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error { + ndrTag := parseTags(tag) + sizeStr, ok := ndrTag.Map["size"] + if !ok { + return errors.New("size tag not available") + } + size, err := strconv.Atoi(sizeStr) + if err != nil { + return fmt.Errorf("size not valid: %v", err) + } + b, err := dec.readBytes(size) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b).Convert(v.Type())) + return nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go new file mode 100644 index 00000000..b7a910b3 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go @@ -0,0 +1,70 @@ +package ndr + +import ( + "fmt" + "reflect" +) + +const ( + subStringArrayTag = `ndr:"varying,X-subStringArray"` + subStringArrayValue = "X-subStringArray" +) + +func uint16SliceToString(a []uint16) string { + s := make([]rune, len(a), len(a)) + for i := range s { + s[i] = rune(a[i]) + } + if len(s) > 0 { + // Remove any null terminator + if s[len(s)-1] == rune(0) { + s = s[:len(s)-1] + } + } + return string(s) +} + +func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) { + a := new([]uint16) + v := reflect.ValueOf(a) + var t reflect.StructTag + err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def) + if err != nil { + return "", err + } + s := uint16SliceToString(*a) + return s, nil +} + +func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error { + d, _ := sliceDimensions(v.Type()) + ndrTag := parseTags(tag) + var m []int + //var ms int + if ndrTag.HasValue(TagConformant) { + for i := 0; i < d; i++ { + m = append(m, int(dec.precedingMax())) + } + //common max size + _ = dec.precedingMax() + //ms = int(n) + } + tag = reflect.StructTag(subStringArrayTag) + err := dec.fillVaryingArray(v, tag, def) + if err != nil { + return fmt.Errorf("could not read string array: %v", err) + } + return nil +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go new file mode 100644 index 00000000..01657e0a --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go @@ -0,0 +1,69 @@ +package ndr + +import ( + "fmt" + "reflect" + "strings" +) + +const ndrNameSpace = "ndr" + +type tags struct { + Values []string + Map map[string]string +} + +// parse the struct field tags and extract the ndr related ones. +// format of tag ndr:"value,key:value1,value2" +func parseTags(st reflect.StructTag) tags { + s := st.Get(ndrNameSpace) + t := tags{ + Values: []string{}, + Map: make(map[string]string), + } + if s != "" { + ndrTags := strings.Trim(s, `"`) + for _, tag := range strings.Split(ndrTags, ",") { + if strings.Contains(tag, ":") { + m := strings.SplitN(tag, ":", 2) + t.Map[m[0]] = m[1] + } else { + t.Values = append(t.Values, tag) + } + } + } + return t +} + +func appendTag(t reflect.StructTag, s string) reflect.StructTag { + ts := t.Get(ndrNameSpace) + ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s) + return reflect.StructTag(ts) +} + +func (t *tags) StructTag() reflect.StructTag { + mv := t.Values + for key, val := range t.Map { + mv = append(mv, key+":"+val) + } + s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"` + return reflect.StructTag(s) +} + +func (t *tags) delete(s string) { + for i, x := range t.Values { + if x == s { + t.Values = append(t.Values[:i], t.Values[i+1:]...) + } + } + delete(t.Map, s) +} + +func (t *tags) HasValue(s string) bool { + for _, v := range t.Values { + if v == s { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go new file mode 100644 index 00000000..6a657fa6 --- /dev/null +++ b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go @@ -0,0 +1,57 @@ +package ndr + +import ( + "errors" + "fmt" + "reflect" +) + +// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation. +// The union's discriminating tag will be passed to the SwitchFunc method. +// The discriminating tag field must have the struct tag: `ndr:"unionTag"` +// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"` +// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"` +type Union interface { + SwitchFunc(t interface{}) string +} + +// Union related constants such as struct tag values +const ( + unionSelectionFuncName = "SwitchFunc" + TagEncapsulated = "encapsulated" + TagUnionTag = "unionTag" + TagUnionField = "unionField" +) + +func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) { + ndrTag := parseTags(tag) + if !ndrTag.HasValue(TagUnionTag) { + return + } + r = field + // For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the + // field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as + // the first part of the union representation. + if !ndrTag.HasValue(TagEncapsulated) { + dec.r.Discard(int(r.Type().Size())) + } + return +} + +// unionSelectedField returns the field name of which of the union values to fill +func unionSelectedField(union, discriminant reflect.Value) (string, error) { + if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) { + return "", errors.New("struct does not implement union interface") + } + args := []reflect.Value{discriminant} + // Call the SelectFunc of the union struct to find the name of the field to fill with the value selected. + sf := union.MethodByName(unionSelectionFuncName) + if !sf.IsValid() { + return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName) + } + f := sf.Call(args) + if f[0].Kind() != reflect.String || f[0].String() == "" { + return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill") + } + return f[0].String(), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9375eac2..d5c68688 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,14 @@ -# github.com/Shopify/sarama v1.11.0 +# github.com/Shopify/sarama v1.26.4 ## explicit github.com/Shopify/sarama github.com/Shopify/sarama/mocks -# github.com/Shopify/toxiproxy v2.1.4+incompatible -## explicit # github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61 ## explicit github.com/araddon/gou -# github.com/aws/aws-sdk-go v1.19.28 +# github.com/aws/aws-sdk-go v1.32.7 ## explicit github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client @@ -27,72 +26,121 @@ github.com/aws/aws-sdk-go/aws/endpoints github.com/aws/aws-sdk-go/aws/request github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/s3/internal/arn github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/s3/s3manager github.com/aws/aws-sdk-go/service/sts -# github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 +github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/bitly/go-hostpool v0.1.0 ## explicit github.com/bitly/go-hostpool # github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 ## explicit -# github.com/davecgh/go-spew v1.1.0 +# github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 -## explicit +# github.com/eapache/go-resiliency v1.2.0 github.com/eapache/go-resiliency/breaker -# github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 -## explicit +# github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 -## explicit github.com/eapache/queue -# github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec -## explicit +# github.com/golang/snappy v0.0.1 github.com/golang/snappy -# github.com/google/gopacket v0.0.0-20190211013929-f86faeb88894 +# github.com/google/gopacket v1.1.17 ## explicit github.com/google/gopacket github.com/google/gopacket/layers github.com/google/gopacket/pcap -# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af +# github.com/hashicorp/go-uuid v1.0.2 +github.com/hashicorp/go-uuid +# github.com/jcmturner/gofork v1.0.0 +github.com/jcmturner/gofork/encoding/asn1 +github.com/jcmturner/gofork/x/crypto/pbkdf2 +# github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath -# github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 -## explicit -github.com/klauspost/crc32 -# github.com/kr/pretty v0.0.0-20130510082846-bc9499caa0f4 -## explicit -# github.com/kr/text v0.0.0-20130911015532-6807e777504f +# github.com/klauspost/compress v1.10.10 ## explicit +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/snappy +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b ## explicit github.com/mattbaird/elastigo/lib -# github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 -## explicit -# github.com/pierrec/lz4 v0.0.0-20161206202305-5c9560bfa9ac +# github.com/pierrec/lz4 v2.5.2+incompatible ## explicit github.com/pierrec/lz4 -# github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 -## explicit -github.com/pierrec/xxHash/xxHash32 -# github.com/rcrowley/go-metrics v0.0.0-20161128210544-1f30fe9094a5 +github.com/pierrec/lz4/internal/xxh32 +# github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 ## explicit github.com/rcrowley/go-metrics # github.com/smartystreets/goconvey v1.6.4 ## explicit -# github.com/stretchr/testify v1.5.1 -## explicit +# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 +## explicit +golang.org/x/crypto/md4 +golang.org/x/crypto/pbkdf2 +# golang.org/x/net v0.0.0-20200602114024-627f9648deb9 +## explicit +golang.org/x/net/internal/socks +golang.org/x/net/proxy +# gopkg.in/jcmturner/aescts.v1 v1.0.1 +gopkg.in/jcmturner/aescts.v1 +# gopkg.in/jcmturner/dnsutils.v1 v1.0.1 +gopkg.in/jcmturner/dnsutils.v1 +# gopkg.in/jcmturner/gokrb5.v7 v7.5.0 +gopkg.in/jcmturner/gokrb5.v7/asn1tools +gopkg.in/jcmturner/gokrb5.v7/client +gopkg.in/jcmturner/gokrb5.v7/config +gopkg.in/jcmturner/gokrb5.v7/credentials +gopkg.in/jcmturner/gokrb5.v7/crypto +gopkg.in/jcmturner/gokrb5.v7/crypto/common +gopkg.in/jcmturner/gokrb5.v7/crypto/etype +gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961 +gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962 +gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757 +gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009 +gopkg.in/jcmturner/gokrb5.v7/gssapi +gopkg.in/jcmturner/gokrb5.v7/iana +gopkg.in/jcmturner/gokrb5.v7/iana/addrtype +gopkg.in/jcmturner/gokrb5.v7/iana/adtype +gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag +gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype +gopkg.in/jcmturner/gokrb5.v7/iana/errorcode +gopkg.in/jcmturner/gokrb5.v7/iana/etypeID +gopkg.in/jcmturner/gokrb5.v7/iana/flags +gopkg.in/jcmturner/gokrb5.v7/iana/keyusage +gopkg.in/jcmturner/gokrb5.v7/iana/msgtype +gopkg.in/jcmturner/gokrb5.v7/iana/nametype +gopkg.in/jcmturner/gokrb5.v7/iana/patype +gopkg.in/jcmturner/gokrb5.v7/kadmin +gopkg.in/jcmturner/gokrb5.v7/keytab +gopkg.in/jcmturner/gokrb5.v7/krberror +gopkg.in/jcmturner/gokrb5.v7/messages +gopkg.in/jcmturner/gokrb5.v7/pac +gopkg.in/jcmturner/gokrb5.v7/types +# gopkg.in/jcmturner/rpc.v1 v1.1.0 +gopkg.in/jcmturner/rpc.v1/mstypes +gopkg.in/jcmturner/rpc.v1/ndr