From 180de74569d71742a98919cc13ad519f6b351581 Mon Sep 17 00:00:00 2001 From: Nikolay Bystritskiy Date: Sun, 6 Feb 2022 22:02:10 +0100 Subject: [PATCH] vendor dependecies from cleanenv --- vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 211 +++ vendor/github.com/BurntSushi/toml/decode.go | 560 ++++++ .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 21 + vendor/github.com/BurntSushi/toml/doc.go | 13 + vendor/github.com/BurntSushi/toml/encode.go | 694 +++++++ vendor/github.com/BurntSushi/toml/error.go | 229 +++ .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1219 ++++++++++++ vendor/github.com/BurntSushi/toml/meta.go | 120 ++ vendor/github.com/BurntSushi/toml/parse.go | 763 ++++++++ .../github.com/BurntSushi/toml/type_fields.go | 242 +++ .../github.com/BurntSushi/toml/type_toml.go | 70 + .../ilyakaznacheev/cleanenv/.gitignore | 4 + .../ilyakaznacheev/cleanenv/.travis.yml | 36 + .../ilyakaznacheev/cleanenv/LICENSE | 9 + .../ilyakaznacheev/cleanenv/README.md | 309 +++ .../ilyakaznacheev/cleanenv/cleanenv.go | 606 ++++++ .../ilyakaznacheev/cleanenv/docs.go | 63 + .../ilyakaznacheev/cleanenv/logo.svg | 137 ++ vendor/github.com/joho/godotenv/LICENCE | 23 + vendor/github.com/joho/godotenv/README.md | 188 ++ vendor/github.com/joho/godotenv/godotenv.go | 363 ++++ vendor/github.com/joho/godotenv/renovate.json | 5 + vendor/olympos.io/encoding/edn/.travis.yml | 4 + vendor/olympos.io/encoding/edn/LICENSE | 25 + vendor/olympos.io/encoding/edn/README.md | 116 ++ vendor/olympos.io/encoding/edn/compact.go | 98 + vendor/olympos.io/encoding/edn/decode.go | 1678 +++++++++++++++++ vendor/olympos.io/encoding/edn/edn_tags.go | 142 ++ vendor/olympos.io/encoding/edn/encode.go | 1422 ++++++++++++++ vendor/olympos.io/encoding/edn/extras.go | 177 ++ vendor/olympos.io/encoding/edn/fold.go | 143 ++ vendor/olympos.io/encoding/edn/lexer.go | 603 ++++++ vendor/olympos.io/encoding/edn/pprint.go | 245 +++ vendor/olympos.io/encoding/edn/tags.go | 44 + vendor/olympos.io/encoding/edn/types.go | 149 ++ 39 files changed, 10808 insertions(+) create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/meta.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/BurntSushi/toml/type_toml.go create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/.gitignore create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/.travis.yml create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/LICENSE create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/README.md create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/cleanenv.go create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/docs.go create mode 100644 vendor/github.com/ilyakaznacheev/cleanenv/logo.svg create mode 100644 vendor/github.com/joho/godotenv/LICENCE create mode 100644 vendor/github.com/joho/godotenv/README.md create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/joho/godotenv/renovate.json create mode 100644 vendor/olympos.io/encoding/edn/.travis.yml create mode 100644 vendor/olympos.io/encoding/edn/LICENSE create mode 100644 vendor/olympos.io/encoding/edn/README.md create mode 100644 vendor/olympos.io/encoding/edn/compact.go create mode 100644 vendor/olympos.io/encoding/edn/decode.go create mode 100644 vendor/olympos.io/encoding/edn/edn_tags.go create mode 100644 vendor/olympos.io/encoding/edn/encode.go create mode 100644 vendor/olympos.io/encoding/edn/extras.go create mode 100644 vendor/olympos.io/encoding/edn/fold.go create mode 100644 vendor/olympos.io/encoding/edn/lexer.go create mode 100644 vendor/olympos.io/encoding/edn/pprint.go create mode 100644 vendor/olympos.io/encoding/edn/tags.go create mode 100644 vendor/olympos.io/encoding/edn/types.go diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 00000000..f621b011 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1 @@ +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 00000000..01b57432 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..cc13f866 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,211 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; install it with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Testing +This package passes all tests in [toml-test] for both the decoder and the +encoder. + +[toml-test]: https://github.com/BurntSushi/toml-test + +### Examples +This package works similar to how the Go standard library handles XML and JSON. +Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +err := toml.Decode(tomlData, &conf) +// handle error +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_example/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..e24f0c5d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,560 @@ +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = 9007199254740991 // 2^53-1 +) + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, e("cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + types: p.types, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + } + return md, md.unify(p.mapping, rv) +} + +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return e("value %f is out of range for float32", num) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + switch rv.Kind() { + case reflect.Float32: + if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { + return e("value %d is out of range for float32", num) + } + fallthrough + case reflect.Float64: + if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { + return e("value %d is out of range for float64", num) + } + rv.SetFloat(float64(num)) + default: + panic("bug") + } + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return md.badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return true + } + return false +} + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 00000000..eddfb641 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 00000000..c6af3f23 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..099c4a77 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,13 @@ +/* +Package toml implements decoding and encoding of TOML files. + +This package supports TOML v1.0.0, as listed on https://toml.io + +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. + +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..dee4e6d3 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,694 @@ +package toml + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. +// +// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the Encoder's writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case: time needs to be in ISO8601 format. + // + // Special case: if we can marshal the type to text, then we used that. This + // prevents the encoder for handling these types as generic structs (or + // whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler, Marshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + enc.writeQuoted(string(s)) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + enc.writeQuoted(string(s)) + return + } + + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + continue + } + + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + if getOptions(f.Tag).name == "" { + addFields(t, frv, append(start, f.Index...)) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) + } + continue + } + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + if _, ok := rv.Interface().(time.Time); ok { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + default: + if isMarshaler(rv) { + return tomlString + } + + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + switch rv.Interface().(type) { + case encoding.TextMarshaler: + return true + case Marshaler: + return true + } + + // Someone used a pointer receiver: we can make it work for pointer values. + if rv.CanAddr() { + if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { + return true + } + if _, ok := rv.Addr().Interface().(Marshaler); ok { + return true + } + } + return false +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + + /// Don't allow nil. + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + if tomlTypeOfGo(rv.Index(i)) == nil { + encPanic(errArrayNilElement) + } + } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌────────────────────┐ +// │ ┌───┐ ┌─────┐│ +// v v v v vv +// key = {k = v, k2 = v2} +// +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 00000000..36edc465 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,229 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax. +// +// For example invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using ErrorWithLocation(): +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// Furthermore, the ErrorWithUsage() can be used to print the above with some +// more detailed usage guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + Line int // Line the error occurred. Deprecated: use Position. + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + return m + "Error help:\n\n " + + strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 00000000..022f15bc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..63ef20f4 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1219 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 00000000..868619fb --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,120 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]struct{} +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if typ, ok := md.types[Key(key).String()]; ok { + return typ.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..8269cca1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,763 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + + ordered []Key // List of keys in the order that they appear in the TOML data. + mapping map[string]interface{} // Map keyname → key value. + types map[string]tomlType // Map keyname → TOML type. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(key, val) + p.setType(key, typ) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.types[keyContext.String()] = typ +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..254ca82e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 00000000..4e90d773 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/.gitignore b/vendor/github.com/ilyakaznacheev/cleanenv/.gitignore new file mode 100644 index 00000000..a2718668 --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/.gitignore @@ -0,0 +1,4 @@ +.* +!.travis.yml +!.github +!.gitignore diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/.travis.yml b/vendor/github.com/ilyakaznacheev/cleanenv/.travis.yml new file mode 100644 index 00000000..ab1cef15 --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/.travis.yml @@ -0,0 +1,36 @@ +language: go + +branches: + only: + - master + - develop + - /^v\d+\.\d+(\.\d+)?(-\S*)?$/ + +go: + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + +env: + - GO111MODULE=on + +before_install: + - go get + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/LICENSE b/vendor/github.com/ilyakaznacheev/cleanenv/LICENSE new file mode 100644 index 00000000..95453753 --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/LICENSE @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2019 Ilya Kaznacheev + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/README.md b/vendor/github.com/ilyakaznacheev/cleanenv/README.md new file mode 100644 index 00000000..fd0b90ee --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/README.md @@ -0,0 +1,309 @@ +![Clean Env](logo.svg) + +# Clean Env + +Minimalistic configuration reader + +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) +[![GoDoc](https://godoc.org/github.com/ilyakaznacheev/cleanenv?status.svg)](https://godoc.org/github.com/ilyakaznacheev/cleanenv) +[![Go Report Card](https://goreportcard.com/badge/github.com/ilyakaznacheev/cleanenv)](https://goreportcard.com/report/github.com/ilyakaznacheev/cleanenv) +[![Coverage Status](https://codecov.io/github/ilyakaznacheev/cleanenv/coverage.svg?branch=master)](https://codecov.io/gh/ilyakaznacheev/cleanenv) +[![Build Status](https://travis-ci.org/ilyakaznacheev/cleanenv.svg?branch=master)](https://travis-ci.org/ilyakaznacheev/cleanenv) +[![Release](https://img.shields.io/github/release/ilyakaznacheev/cleanenv.svg)](https://github.com/ilyakaznacheev/cleanenv/releases/) +[![License](https://img.shields.io/github/license/ilyakaznacheev/cleanenv.svg)](https://github.com/ilyakaznacheev/cleanenv/blob/master/LICENSE) + +## Overview + +This is a simple configuration reading tool. It just does the following: + +- reads and parses configuration structure from the file +- reads and overwrites configuration structure from environment variables +- writes a detailed variable list to help output + +## Content + +- [Installation](#installation) +- [Usage](#usage) + - [Read Configuration](#read-configuration) + - [Read Environment Variables Only](#read-environment-variables-only) + - [Update Environment Variables](#update-environment-variables) + - [Description](#description) +- [Model Format](#model-format) +- [Supported types](#supported-types) +- [Custom Functions](#custom-functions) + - [Custom Value Setter](#custom-value-setter) + - [Custom Value Update](#custom-value-update) +- [Supported File Formats](#supported-file-formats) +- [Integration](#integration) + - [Flag](#flag) +- [Examples](#examples) +- [Contribution](#contribution) +- [Thanks](#thanks) + +## Installation + +To install the package run + +```bash +go get -u github.com/ilyakaznacheev/cleanenv +``` + +## Usage + +The package is oriented to be simple in use and explicitness. + +The main idea is to use a structured configuration variable instead of any sort of dynamic set of configuration fields like some libraries does, to avoid unnecessary type conversions and move the configuration through the program as a simple structure, not as an object with complex behavior. + +There are just several actions you can do with this tool and probably only things you want to do with your config if your application is not too complicated. + +- read configuration file +- read environment variables +- read some environment variables again + +### Read Configuration + +You can read a configuration file and environment variables in a single function call. + +```go +import github.com/ilyakaznacheev/cleanenv + +type ConfigDatabase struct { + Port string `yaml:"port" env:"PORT" env-default:"5432"` + Host string `yaml:"host" env:"HOST" env-default:"localhost"` + Name string `yaml:"name" env:"NAME" env-default:"postgres"` + User string `yaml:"user" env:"USER" env-default:"user"` + Password string `yaml:"password" env:"PASSWORD"` +} + +var cfg ConfigDatabase + +err := cleanenv.ReadConfig("config.yml", &cfg) +if err != nil { + ... +} +``` + +This will do the following: + +1. parse configuration file according to YAML format (`yaml` tag in this case); +1. reads environment variables and overwrites values from the file with the values which was found in the environment (`env` tag); +1. if no value was found on the first two steps, the field will be filled with the default value (`env-default` tag) if it is set. + +### Read Environment Variables Only + +Sometimes you don't want to use configuration files at all, or you may want to use `.env` file format instead. Thus, you can limit yourself with only reading environment variables: + +```go +import github.com/ilyakaznacheev/cleanenv + +type ConfigDatabase struct { + Port string `env:"PORT" env-default:"5432"` + Host string `env:"HOST" env-default:"localhost"` + Name string `env:"NAME" env-default:"postgres"` + User string `env:"USER" env-default:"user"` + Password string `env:"PASSWORD"` +} + +var cfg ConfigDatabase + +err := cleanenv.ReadEnv(&cfg) +if err != nil { + ... +} +``` + +### Update Environment Variables + +Some environment variables may change during the application run. To get the new values you need to mark these variables as updatable with the tag `env-upd` and then run the update function: + +```go +import github.com/ilyakaznacheev/cleanenv + +type ConfigRemote struct { + Port string `env:"PORT" env-upd` + Host string `env:"HOST" env-upd` + UserName string `env:"USERNAME"` +} + +var cfg ConfigRemote + +cleanenv.ReadEnv(&cfg) + +// ... some actions in-between + +err := cleanenv.UpdateEnv(&cfg) +if err != nil { + ... +} +``` + +Here remote host and port may change in a distributed system architecture. Fields `cfg.Port` and `cfg.Host` can be updated in the runtime from corresponding environment variables. You can update them before the remote service call. Field `cfg.UserName` will not be changed after the initial read, though. + +### Description + +You can get descriptions of all environment variables to use them in the help documentation. + +```go +import github.com/ilyakaznacheev/cleanenv + +type ConfigServer struct { + Port string `env:"PORT" env-description:"server port"` + Host string `env:"HOST" env-description:"server host"` +} + +var cfg ConfigRemote + +help, err := cleanenv.GetDescription(&cfg, nil) +if err != nil { + ... +} +``` + +You will get the following: + +``` +Environment variables: + PORT server port + HOST server host +``` + +## Model Format + +Library uses tags to configure the model of configuration structure. There are the following tags: + +- `env=""` - environment variable name (e.g. `env="PORT"`); +- `env-upd` - flag to mark a field as updatable. Run `UpdateEnv(&cfg)` to refresh updatable variables from environment; +- `env-required` - flag to mark a field as required. If set will return an error during environment parsing when the flagged as required field is empty (default Go value). Tag `env-default` is ignored in this case; +- `env-default=""` - default value. If the field wasn't filled from the environment variable default value will be used instead; +- `env-separator=""` - custom list and map separator. If not set, the default separator `,` will be used; +- `env-description=""` - environment variable description; +- `env-layout=""` - parsing layout (for types like `time.Time`); +- `env-prefix=""` - prefix for all fields of nested structure (only for nested structures); + +## Supported types + +There are following supported types: + +- `int` (any kind); +- `float` (any kind); +- `string`; +- `boolean`; +- slices (of any other supported type); +- maps (of any other supported type); +- `time.Duration`; +- `time.Time` (layout by default is RFC3339, may be overridden by `env-layout`); +- any type implementing `cleanenv.Setter` interface. + + +## Custom Functions + +To enhance package abilities you can use some custom functions. + +### Custom Value Setter + +To make custom type allows to set the value from the environment variable, you need to implement the `Setter` interface on the field level: + +```go +type MyField string + +func (f *MyField) SetValue(s string) error { + if s == "" { + return fmt.Errorf("field value can't be empty") + } + *f = MyField("my field is: "+ s) + return nil +} + +type Config struct { + Field MyField `env="MY_VALUE"` +} +``` + +`SetValue` method should implement conversion logic from string to custom type. + +### Custom Value Update + +You may need to execute some custom field update logic, e.g. for remote config load. + +Thus, you need to implement the `Updater` interface on the structure level: + +```go +type Config struct { + Field string +} + +func (c *Config) Update() error { + newField, err := SomeCustomUpdate() + f.Field = newField + return err +} +``` + +## Supported File Formats + +There are several most popular config file formats supported: + +- YAML +- JSON +- TOML +- ENV +- EDN + +## Integration + +The package can be used with many other solutions. To make it more useful, we made some helpers. + +### Flag + +You can use the cleanenv help together with Golang `flag` package. + +```go +// create some config structure +var cfg config + +// create flag set using `flag` package +fset := flag.NewFlagSet("Example", flag.ContinueOnError) + +// get config usage with wrapped flag usage +fset.Usage = cleanenv.FUsage(fset.Output(), &cfg, nil, fset.Usage) + +fset.Parse(os.Args[1:]) +``` + +## Examples + +```go +type Config struct { + Port string `yaml:"port" env:"PORT" env-default:"8080"` + Host string `yaml:"host" env:"HOST" env-default:"localhost"` +} + +var cfg Config + +err := ReadConfig("config.yml", &cfg) +if err != nil { + ... +} +``` + +This code will try to read and parse the configuration file `config.yml` as the structure is described in the `Config` structure. Then it will overwrite fields from available environment variables (`PORT`, `HOST`). + +For more details check the [example](/example) directory. + +## Contribution + +The tool is open-sourced under the [MIT](LICENSE) license. + +If you will find some error, want to add something or ask a question - feel free to create an issue and/or make a pull request. + +Any contribution is welcome. + +## Thanks + +Big thanks to a project [kelseyhightower/envconfig](https://github.com/kelseyhightower/envconfig) for inspiration. + +The logo was made by [alexchoffy](https://www.instagram.com/alexchoffy/). + +## Blog Posts + +[Clean Configuration Management in Golang](https://dev.to/ilyakaznacheev/clean-configuration-management-in-golang-1c89). diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/cleanenv.go b/vendor/github.com/ilyakaznacheev/cleanenv/cleanenv.go new file mode 100644 index 00000000..3b03d744 --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/cleanenv.go @@ -0,0 +1,606 @@ +package cleanenv + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "math" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml" + "github.com/joho/godotenv" + "gopkg.in/yaml.v3" + "olympos.io/encoding/edn" +) + +const ( + // DefaultSeparator is a default list and map separator character + DefaultSeparator = "," +) + +// Supported tags +const ( + // Name of the environment variable or a list of names + TagEnv = "env" + // Value parsing layout (for types like time.Time) + TagEnvLayout = "env-layout" + // Default value + TagEnvDefault = "env-default" + // Custom list and map separator + TagEnvSeparator = "env-separator" + // Environment variable description + TagEnvDescription = "env-description" + // Flag to mark a field as updatable + TagEnvUpd = "env-upd" + // Flag to mark a field as required + TagEnvRequired = "env-required" + // Flag to specify prefix for structure fields + TagEnvPrefix = "env-prefix" +) + +// Setter is an interface for a custom value setter. +// +// To implement a custom value setter you need to add a SetValue function to your type that will receive a string raw value: +// +// type MyField string +// +// func (f *MyField) SetValue(s string) error { +// if s == "" { +// return fmt.Errorf("field value can't be empty") +// } +// *f = MyField("my field is: " + s) +// return nil +// } +type Setter interface { + SetValue(string) error +} + +// Updater gives an ability to implement custom update function for a field or a whole structure +type Updater interface { + Update() error +} + +// ReadConfig reads configuration file and parses it depending on tags in structure provided. +// Then it reads and parses +// +// Example: +// +// type ConfigDatabase struct { +// Port string `yaml:"port" env:"PORT" env-default:"5432"` +// Host string `yaml:"host" env:"HOST" env-default:"localhost"` +// Name string `yaml:"name" env:"NAME" env-default:"postgres"` +// User string `yaml:"user" env:"USER" env-default:"user"` +// Password string `yaml:"password" env:"PASSWORD"` +// } +// +// var cfg ConfigDatabase +// +// err := cleanenv.ReadConfig("config.yml", &cfg) +// if err != nil { +// ... +// } +func ReadConfig(path string, cfg interface{}) error { + err := parseFile(path, cfg) + if err != nil { + return err + } + + return readEnvVars(cfg, false) +} + +// ReadEnv reads environment variables into the structure. +func ReadEnv(cfg interface{}) error { + return readEnvVars(cfg, false) +} + +// UpdateEnv rereads (updates) environment variables in the structure. +func UpdateEnv(cfg interface{}) error { + return readEnvVars(cfg, true) +} + +// parseFile parses configuration file according to it's extension +// +// Currently following file extensions are supported: +// +// - yaml +// +// - json +// +// - toml +// +// - env +// +// - edn +func parseFile(path string, cfg interface{}) error { + // open the configuration file + f, err := os.OpenFile(path, os.O_RDONLY|os.O_SYNC, 0) + if err != nil { + return err + } + defer f.Close() + + // parse the file depending on the file type + switch ext := strings.ToLower(filepath.Ext(path)); ext { + case ".yaml", ".yml": + err = parseYAML(f, cfg) + case ".json": + err = parseJSON(f, cfg) + case ".toml": + err = parseTOML(f, cfg) + case ".edn": + err = parseEDN(f, cfg) + case ".env": + err = parseENV(f, cfg) + default: + return fmt.Errorf("file format '%s' doesn't supported by the parser", ext) + } + if err != nil { + return fmt.Errorf("config file parsing error: %s", err.Error()) + } + return nil +} + +// parseYAML parses YAML from reader to data structure +func parseYAML(r io.Reader, str interface{}) error { + return yaml.NewDecoder(r).Decode(str) +} + +// parseJSON parses JSON from reader to data structure +func parseJSON(r io.Reader, str interface{}) error { + return json.NewDecoder(r).Decode(str) +} + +// parseTOML parses TOML from reader to data structure +func parseTOML(r io.Reader, str interface{}) error { + _, err := toml.DecodeReader(r, str) + return err +} + +// parseEDN parses EDN from reader to data structure +func parseEDN(r io.Reader, str interface{}) error { + return edn.NewDecoder(r).Decode(str) +} + +// parseENV, in fact, doesn't fill the structure with environment variable values. +// It just parses ENV file and sets all variables to the environment. +// Thus, the structure should be filled at the next steps. +func parseENV(r io.Reader, _ interface{}) error { + vars, err := godotenv.Parse(r) + if err != nil { + return err + } + + for env, val := range vars { + os.Setenv(env, val) + } + return nil +} + +// structMeta is a structure metadata entity +type structMeta struct { + envList []string + fieldName string + fieldValue reflect.Value + defValue *string + layout *string + separator string + description string + updatable bool + required bool +} + +// isFieldValueZero determines if fieldValue empty or not +func (sm *structMeta) isFieldValueZero() bool { + return isZero(sm.fieldValue) +} + +// readStructMetadata reads structure metadata (types, tags, etc.) +func readStructMetadata(cfgRoot interface{}) ([]structMeta, error) { + type cfgNode struct { + Val interface{} + Prefix string + } + + cfgStack := []cfgNode{{cfgRoot, ""}} + metas := make([]structMeta, 0) + + for i := 0; i < len(cfgStack); i++ { + + s := reflect.ValueOf(cfgStack[i].Val) + sPrefix := cfgStack[i].Prefix + + // unwrap pointer + if s.Kind() == reflect.Ptr { + s = s.Elem() + } + + // process only structures + if s.Kind() != reflect.Struct { + return nil, fmt.Errorf("wrong type %v", s.Kind()) + } + typeInfo := s.Type() + + // read tags + for idx := 0; idx < s.NumField(); idx++ { + fType := typeInfo.Field(idx) + + var ( + defValue *string + layout *string + separator string + ) + + // process nested structure (except of time.Time) + if fld := s.Field(idx); fld.Kind() == reflect.Struct { + // add structure to parsing stack + if fld.Type() != reflect.TypeOf(time.Time{}) { + prefix, _ := fType.Tag.Lookup(TagEnvPrefix) + cfgStack = append(cfgStack, cfgNode{fld.Addr().Interface(), sPrefix + prefix}) + continue + } + // process time.Time + if l, ok := fType.Tag.Lookup(TagEnvLayout); ok { + layout = &l + } + } + + // check is the field value can be changed + if !s.Field(idx).CanSet() { + continue + } + + if def, ok := fType.Tag.Lookup(TagEnvDefault); ok { + defValue = &def + } + + if sep, ok := fType.Tag.Lookup(TagEnvSeparator); ok { + separator = sep + } else { + separator = DefaultSeparator + } + + _, upd := fType.Tag.Lookup(TagEnvUpd) + + _, required := fType.Tag.Lookup(TagEnvRequired) + + envList := make([]string, 0) + + if envs, ok := fType.Tag.Lookup(TagEnv); ok && len(envs) != 0 { + envList = strings.Split(envs, DefaultSeparator) + if sPrefix != "" { + for i := range envList { + envList[i] = sPrefix + envList[i] + } + } + } + + metas = append(metas, structMeta{ + envList: envList, + fieldName: s.Type().Field(idx).Name, + fieldValue: s.Field(idx), + defValue: defValue, + layout: layout, + separator: separator, + description: fType.Tag.Get(TagEnvDescription), + updatable: upd, + required: required, + }) + } + + } + + return metas, nil +} + +// readEnvVars reads environment variables to the provided configuration structure +func readEnvVars(cfg interface{}, update bool) error { + metaInfo, err := readStructMetadata(cfg) + if err != nil { + return err + } + + if updater, ok := cfg.(Updater); ok { + if err := updater.Update(); err != nil { + return err + } + } + + for _, meta := range metaInfo { + // update only updatable fields + if update && !meta.updatable { + continue + } + + var rawValue *string + + for _, env := range meta.envList { + if value, ok := os.LookupEnv(env); ok { + rawValue = &value + break + } + } + + if rawValue == nil && meta.required && meta.isFieldValueZero() { + err := fmt.Errorf("field %q is required but the value is not provided", + meta.fieldName) + return err + } + + if rawValue == nil && meta.isFieldValueZero() { + rawValue = meta.defValue + } + + if rawValue == nil { + continue + } + + if err := parseValue(meta.fieldValue, *rawValue, meta.separator, meta.layout); err != nil { + return err + } + } + + return nil +} + +// parseValue parses value into the corresponding field. +// In case of maps and slices it uses provided separator to split raw value string +func parseValue(field reflect.Value, value, sep string, layout *string) error { + // TODO: simplify recursion + + if field.CanInterface() { + if cs, ok := field.Interface().(Setter); ok { + return cs.SetValue(value) + } else if csp, ok := field.Addr().Interface().(Setter); ok { + return csp.SetValue(value) + } + } + + valueType := field.Type() + + switch valueType.Kind() { + // parse string value + case reflect.String: + field.SetString(value) + + // parse boolean value + case reflect.Bool: + b, err := strconv.ParseBool(value) + if err != nil { + return err + } + field.SetBool(b) + + // parse integer (or time) value + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if field.Kind() == reflect.Int64 && valueType.PkgPath() == "time" && valueType.Name() == "Duration" { + // try to parse time + d, err := time.ParseDuration(value) + if err != nil { + return err + } + field.SetInt(int64(d)) + + } else { + // parse regular integer + number, err := strconv.ParseInt(value, 0, valueType.Bits()) + if err != nil { + return err + } + field.SetInt(number) + } + + // parse unsigned integer value + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + number, err := strconv.ParseUint(value, 0, valueType.Bits()) + if err != nil { + return err + } + field.SetUint(number) + + // parse floating point value + case reflect.Float32, reflect.Float64: + number, err := strconv.ParseFloat(value, valueType.Bits()) + if err != nil { + return err + } + field.SetFloat(number) + + // parse sliced value + case reflect.Slice: + sliceValue, err := parseSlice(valueType, value, sep, layout) + if err != nil { + return err + } + + field.Set(*sliceValue) + + // parse mapped value + case reflect.Map: + mapValue, err := parseMap(valueType, value, sep, layout) + if err != nil { + return err + } + + field.Set(*mapValue) + + case reflect.Struct: + // process time.Time only + if valueType.PkgPath() == "time" && valueType.Name() == "Time" { + + var l string + if layout != nil { + l = *layout + } else { + l = time.RFC3339 + } + val, err := time.Parse(l, value) + if err != nil { + return err + } + field.Set(reflect.ValueOf(val)) + } + + default: + return fmt.Errorf("unsupported type %s.%s", valueType.PkgPath(), valueType.Name()) + } + + return nil +} + +// parseSlice parses value into a slice of given type +func parseSlice(valueType reflect.Type, value string, sep string, layout *string) (*reflect.Value, error) { + sliceValue := reflect.MakeSlice(valueType, 0, 0) + if valueType.Elem().Kind() == reflect.Uint8 { + sliceValue = reflect.ValueOf([]byte(value)) + } else if len(strings.TrimSpace(value)) != 0 { + values := strings.Split(value, sep) + sliceValue = reflect.MakeSlice(valueType, len(values), len(values)) + + for i, val := range values { + if err := parseValue(sliceValue.Index(i), val, sep, layout); err != nil { + return nil, err + } + } + } + return &sliceValue, nil +} + +// parseMap parses value into a map of given type +func parseMap(valueType reflect.Type, value string, sep string, layout *string) (*reflect.Value, error) { + mapValue := reflect.MakeMap(valueType) + if len(strings.TrimSpace(value)) != 0 { + pairs := strings.Split(value, sep) + for _, pair := range pairs { + kvPair := strings.SplitN(pair, ":", 2) + if len(kvPair) != 2 { + return nil, fmt.Errorf("invalid map item: %q", pair) + } + k := reflect.New(valueType.Key()).Elem() + err := parseValue(k, kvPair[0], sep, layout) + if err != nil { + return nil, err + } + v := reflect.New(valueType.Elem()).Elem() + err = parseValue(v, kvPair[1], sep, layout) + if err != nil { + return nil, err + } + mapValue.SetMapIndex(k, v) + } + } + return &mapValue, nil +} + +// GetDescription returns a description of environment variables. +// You can provide a custom header text. +func GetDescription(cfg interface{}, headerText *string) (string, error) { + meta, err := readStructMetadata(cfg) + if err != nil { + return "", err + } + + var header, description string + + if headerText != nil { + header = *headerText + } else { + header = "Environment variables:" + } + + for _, m := range meta { + if len(m.envList) == 0 { + continue + } + + for idx, env := range m.envList { + + elemDescription := fmt.Sprintf("\n %s %s", env, m.fieldValue.Kind()) + if idx > 0 { + elemDescription += fmt.Sprintf(" (alternative to %s)", m.envList[0]) + } + elemDescription += fmt.Sprintf("\n \t%s", m.description) + if m.defValue != nil { + elemDescription += fmt.Sprintf(" (default %q)", *m.defValue) + } + description += elemDescription + } + } + + if description != "" { + return header + description, nil + } + return "", nil +} + +// Usage returns a configuration usage help. +// Other usage instructions can be wrapped in and executed before this usage function. +// The default output is STDERR. +func Usage(cfg interface{}, headerText *string, usageFuncs ...func()) func() { + return FUsage(os.Stderr, cfg, headerText, usageFuncs...) +} + +// FUsage prints configuration help into the custom output. +// Other usage instructions can be wrapped in and executed before this usage function +func FUsage(w io.Writer, cfg interface{}, headerText *string, usageFuncs ...func()) func() { + return func() { + for _, fn := range usageFuncs { + fn() + } + + _ = flag.Usage + + text, err := GetDescription(cfg, headerText) + if err != nil { + return + } + if len(usageFuncs) > 0 { + fmt.Fprintln(w) + } + fmt.Fprintln(w, text) + } +} + +// isZero is a backport of reflect.Value.IsZero() +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + // This should never happens, but will act as a safeguard for + // later, as a default value doesn't makes sense here. + panic(fmt.Sprintf("Value.IsZero: %v", v.Kind())) + } +} diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/docs.go b/vendor/github.com/ilyakaznacheev/cleanenv/docs.go new file mode 100644 index 00000000..53f9fa1e --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/docs.go @@ -0,0 +1,63 @@ +/* +Package cleanenv gives you a single tool to read application configuration from several sources with ease. + +Features + +- read from several file formats (YAML, JSON, TOML, ENV, EDN) and parse into the internal structure; + +- read environment variables into the internal structure; + +- output environment variable list with descriptions into help output; + +- custom variable readers (e.g. if you want to read from remote config server, etc). + +Usage + +You can just prepare the config structure and fill it from the config file and environment variables. + + type Config struct { + Port string `yaml:"port" env:"PORT" env-default:"8080"` + Host string `yaml:"host" env:"HOST" env-default:"localhost"` + } + + var cfg Config + + ReadConfig("config.yml", &cfg) + +Help output + +You can list all of your environment variables by means of help output: + + type ConfigServer struct { + Port string `env:"PORT" env-description:"server port"` + Host string `env:"HOST" env-description:"server host"` + } + + var cfg ConfigRemote + + help, err := cleanenv.GetDescription(&cfg, nil) + if err != nil { + ... + } + + // setup help output + f := flag.NewFlagSet("Example app", 1) + fu := f.Usage + f.Usage = func() { + fu() + envHelp, _ := cleanenv.GetDescription(&cfg, nil) + fmt.Fprintln(f.Output()) + fmt.Fprintln(f.Output(), envHelp) + } + + f.Parse(os.Args[1:]) + +Then run go run main.go -h and the output will include: + + Environment variables: + PORT server port + HOST server host + +For more detailed information check examples and example tests. +*/ +package cleanenv diff --git a/vendor/github.com/ilyakaznacheev/cleanenv/logo.svg b/vendor/github.com/ilyakaznacheev/cleanenv/logo.svg new file mode 100644 index 00000000..2df5d333 --- /dev/null +++ b/vendor/github.com/ilyakaznacheev/cleanenv/logo.svg @@ -0,0 +1,137 @@ + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 00000000..e7ddd51b --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md new file mode 100644 index 00000000..1ec45b28 --- /dev/null +++ b/vendor/github.com/joho/godotenv/README.md @@ -0,0 +1,188 @@ +# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) + +A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) + +From the original Library: + +> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. +> +> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. + +It can be used as a library (for loading in env for your own daemons etc) or as a bin command. + +There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. + +## Installation + +As a library + +```shell +go get github.com/joho/godotenv +``` + +or if you want to use it as a bin command +```shell +go get github.com/joho/godotenv/cmd/godotenv +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Then in your Go app you can do something like + +```go +package main + +import ( + "github.com/joho/godotenv" + "log" + "os" +) + +func main() { + err := godotenv.Load() + if err != nil { + log.Fatal("Error loading .env file") + } + + s3Bucket := os.Getenv("S3_BUCKET") + secretKey := os.Getenv("SECRET_KEY") + + // now do something with s3 or whatever +} +``` + +If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import + +```go +import _ "github.com/joho/godotenv/autoload" +``` + +While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit + +```go +_ = godotenv.Load("somerandomfile") +_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") +``` + +If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) + +```shell +# I am a comment and that is OK +SOME_VAR=someval +FOO=BAR # comments at line end are OK too +export BAR=BAZ +``` + +Or finally you can do YAML(ish) style + +```yaml +FOO: bar +BAR: baz +``` + +as a final aside, if you don't want godotenv munging your env you can just get a map back instead + +```go +var myEnv map[string]string +myEnv, err := godotenv.Read() + +s3Bucket := myEnv["S3_BUCKET"] +``` + +... or from an `io.Reader` instead of a local file + +```go +reader := getRemoteFile() +myEnv, err := godotenv.Parse(reader) +``` + +... or from a `string` if you so desire + +```go +content := getRemoteFileContent() +myEnv, err := godotenv.Unmarshal(content) +``` + +### Precedence & Conventions + +Existing envs take precedence of envs that are loaded later. + +The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use) +for managing multiple environments (i.e. development, test, production) +is to create an env named `{YOURAPP}_ENV` and load envs in this order: + +```go +env := os.Getenv("FOO_ENV") +if "" == env { + env = "development" +} + +godotenv.Load(".env." + env + ".local") +if "test" != env { + godotenv.Load(".env.local") +} +godotenv.Load(".env." + env) +godotenv.Load() // The Original .env +``` + +If you need to, you can also use `godotenv.Overload()` to defy this convention +and overwrite existing envs instead of only supplanting them. Use with caution. + +### Command Mode + +Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` + +``` +godotenv -f /some/path/to/.env some_command with some args +``` + +If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` + +### Writing Env Files + +Godotenv can also write a map representing the environment to a correctly-formatted and escaped file + +```go +env, err := godotenv.Unmarshal("KEY=value") +err := godotenv.Write(env, "./.env") +``` + +... or to a string + +```go +env, err := godotenv.Unmarshal("KEY=value") +content, err := godotenv.Marshal(env) +``` + +## Contributing + +Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. + +*code changes without tests will not be accepted* + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Releases + +Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. + +Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` + +## CI + +Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) + +## Who? + +The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 00000000..466f2eb4 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,363 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, err := Marshal(envMap) + if err != nil { + return err + } + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(content + "\n") + if err != nil { + return err + } + file.Sync() + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + if d, err := strconv.Atoi(v); err == nil { + lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) + } else { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`) + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.TrimSpace(key) + + key = exportRegex.ReplaceAllString(splitString[0], "$1") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +var ( + singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`) + doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`) + escapeRegex = regexp.MustCompile(`\\.`) + unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) +) + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + singleQuotes := singleQuotesRegex.FindStringSubmatch(value) + + doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + value = unescapeCharsRegex.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + +func expandVariables(v string, m map[string]string) string { + return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { + submatch := expandVarRegex.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.TrimSpace(line) + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/joho/godotenv/renovate.json b/vendor/github.com/joho/godotenv/renovate.json new file mode 100644 index 00000000..f45d8f11 --- /dev/null +++ b/vendor/github.com/joho/godotenv/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base" + ] +} diff --git a/vendor/olympos.io/encoding/edn/.travis.yml b/vendor/olympos.io/encoding/edn/.travis.yml new file mode 100644 index 00000000..dba7b336 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/.travis.yml @@ -0,0 +1,4 @@ +language: go +sudo: false +go: + - 1.5 diff --git a/vendor/olympos.io/encoding/edn/LICENSE b/vendor/olympos.io/encoding/edn/LICENSE new file mode 100644 index 00000000..9efc5f36 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2015, The Go Authors, Jean Niklas L'orange +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + * Neither the name of Google Inc., the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/olympos.io/encoding/edn/README.md b/vendor/olympos.io/encoding/edn/README.md new file mode 100644 index 00000000..3d394443 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/README.md @@ -0,0 +1,116 @@ +# Go implementation of EDN, extensible data notation + +[![GoDoc](https://godoc.org/olympos.io/encoding/edn?status.svg)](https://godoc.org/olympos.io/encoding/edn) + +go-edn is a Golang library to read and write +[EDN](https://github.com/edn-format/edn) (extensible data notation), a subset of +Clojure used for transferring data between applications, much like JSON or XML. +EDN is also a very good language for configuration files, much like a JSON-like +version of YAML. + +This library is heavily influenced by the JSON library that ships with Go, and +people familiar with that package should know the basics of how this library +works. In fact, this should be close to a drop-in replacement for the +`encoding/json` package if you only use basic functionality. + +This implementation is complete, stable, and presumably also bug free. This +is why you don't see any changes in the repository. + +If you wonder why you should (or should not) use EDN, you can have a look at the +[why](docs/why.md) document. + +## Installation and Usage + +The import path for the package is `olympos.io/encoding/edn` + +To install it, run: + +```shell +go get olympos.io/encoding/edn +``` + +To use it in your project, you import `olympos.io/encoding/edn` and refer to it as `edn` +like this: + +```go +import "olympos.io/encoding/edn" + +//... + +edn.DoStuff() +``` + +The previous import path of this library was `gopkg.in/edn.v1`, which is still +permanently supported. + +## Quickstart + +You can follow http://blog.golang.org/json-and-go and replace every occurence of +JSON with EDN (and the JSON data with EDN data), and the text makes almost +perfect sense. The only caveat is that, since EDN is more general than JSON, go-edn +stores arbitrary maps on the form `map[interface{}]interface{}`. + +go-edn also ships with keywords, symbols and tags as types. + +For a longer introduction on how to use the library, see +[introduction.md](docs/introduction.md). If you're familiar with the JSON +package, then the [API Documentation](https://godoc.org/olympos.io/encoding/edn) might +be the only thing you need. + +## Example Usage + +Say you want to describe your pet forum's users as EDN. They have the following +types: + +```go +type Animal struct { + Name string + Type string `edn:"kind"` +} + +type Person struct { + Name string + Birthyear int `edn:"born"` + Pets []Animal +} +``` + +With go-edn, we can do as follows to read and write these types: + +```go +import "olympos.io/encoding/edn" + +//... + + +func ReturnData() (Person, error) { + data := `{:name "Hans", + :born 1970, + :pets [{:name "Cap'n Jack" :kind "Sparrow"} + {:name "Freddy" :kind "Cockatiel"}]}` + var user Person + err := edn.Unmarshal([]byte(data), &user) + // user '==' Person{"Hans", 1970, + // []Animal{{"Cap'n Jack", "Sparrow"}, {"Freddy", "Cockatiel"}}} + return user, err +} +``` + +If you want to write that user again, just `Marshal` it: + +```go +bs, err := edn.Marshal(user) +``` + +## Dependencies + +go-edn has no external dependencies, except the default Go library. However, as +it depends on `math/big.Float`, go-edn requires Go 1.5 or higher. + + +## License + +Copyright © 2015-2019 Jean Niklas L'orange and [contributors](https://github.com/go-edn/edn/graphs/contributors) + +Distributed under the BSD 3-clause license, which is available in the file +LICENSE. diff --git a/vendor/olympos.io/encoding/edn/compact.go b/vendor/olympos.io/encoding/edn/compact.go new file mode 100644 index 00000000..474c770b --- /dev/null +++ b/vendor/olympos.io/encoding/edn/compact.go @@ -0,0 +1,98 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bytes" + "io" +) + +func tokNeedsDelim(t tokenType) bool { + switch t { + case tokenString, tokenListStart, tokenListEnd, tokenVectorStart, + tokenVectorEnd, tokenMapEnd, tokenMapStart, tokenSetStart, tokenDiscard, tokenError: + return false + } + return true +} + +func delimits(r rune) bool { + switch r { + case '{', '}', '[', ']', '(', ')', '\\', '"': + return true + } + return isWhitespace(r) +} + +// Compact appends to dst a compacted form of the EDN-encoded src. It does not +// remove discard values. +func Compact(dst *bytes.Buffer, src []byte) error { + origLen := dst.Len() + var lex lexer + lex.reset() + buf := bytes.NewBuffer(src) + start, pos := 0, 0 + needsDelim := false + prevIgnore := '\uFFFD' + r, size, err := buf.ReadRune() + for ; err == nil; r, size, err = buf.ReadRune() { + ls := lex.state(r) + ppos := pos + pos += size + switch ls { + case lexCont: + if ppos == start && needsDelim && !delimits(r) { + dst.WriteRune(prevIgnore) + } + continue + case lexIgnore: + prevIgnore = r + start = pos + case lexError: + dst.Truncate(origLen) + return lex.err + case lexEnd: + // here we might want to discard #_ and the like. Currently we don't. + dst.Write(src[start:pos]) + needsDelim = tokNeedsDelim(lex.token) + lex.reset() + start = pos + case lexEndPrev: + dst.Write(src[start:ppos]) + lex.reset() + lss := lex.state(r) + needsDelim = tokNeedsDelim(lex.token) + switch lss { + case lexIgnore: + prevIgnore = r + start = pos + case lexCont: + start = ppos + case lexEnd: + dst.WriteRune(r) + lex.reset() + start = pos + case lexEndPrev: + dst.Truncate(origLen) + return errInternal + case lexError: + dst.Truncate(origLen) + return lex.err + } + } + } + if err != io.EOF { + return err + } + ls := lex.eof() + switch ls { + case lexEnd: + dst.Write(src[start:pos]) + case lexError: + dst.Truncate(origLen) + return lex.err + } + return nil +} diff --git a/vendor/olympos.io/encoding/edn/decode.go b/vendor/olympos.io/encoding/edn/decode.go new file mode 100644 index 00000000..fb07613a --- /dev/null +++ b/vendor/olympos.io/encoding/edn/decode.go @@ -0,0 +1,1678 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "runtime" + "strconv" + "strings" + "unicode/utf8" +) + +var ( + errInternal = errors.New("Illegal internal parse state") + errNoneLeft = errors.New("No more tokens to read") + errUnexpected = errors.New("Unexpected token") + errIllegalRune = errors.New("Illegal rune form") +) + +type UnknownTagError struct { + tag []byte + value []byte + inType reflect.Type +} + +func (ute UnknownTagError) Error() string { + return fmt.Sprintf("Unable to decode %s%s into %s", string(ute.tag), + string(ute.value), ute.inType) +} + +// Unmarshal parses the EDN-encoded data and stores the result in the value +// pointed to by v. +// +// Unmarshal uses the inverse of the encodings that Marshal uses, allocating +// maps, slices, and pointers as necessary, with the following additional rules: +// +// First, if the value to store the result into implements edn.Unmarshaler, it +// is called. +// +// If the value is tagged and the tag is known, the EDN value is translated into +// the input of the tag convert function. If no error happens during converting, +// the result of the conversion is then coerced into v if possible. +// +// To unmarshal EDN into a pointer, Unmarshal first handles the case of the EDN +// being the EDN literal nil. In that case, Unmarshal sets the pointer to nil. +// Otherwise, Unmarshal unmarshals the EDN into the value pointed at by the +// pointer. If the pointer is nil, Unmarshal allocates a new value for it to +// point to. +// +// To unmarshal EDN into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// +// To unmarshal EDN into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for EDN booleans +// float64, for EDN floats +// int64, for EDN integers +// int32, for EDN characters +// string, for EDN strings +// []interface{}, for EDN vectors and lists +// map[interface{}]interface{}, for EDN maps +// map[interface{}]bool, for EDN sets +// nil for EDN nil +// edn.Tag for unknown EDN tagged elements +// T for known EDN tagged elements, where T is the result of the converter function +// +// To unmarshal an EDN vector/list into a slice, Unmarshal resets the slice to +// nil and then appends each element to the slice. +// +// To unmarshal an EDN map into a Go map, Unmarshal replaces the map +// with an empty map and then adds key-value pairs from the object to +// the map. +// +// If a EDN value is not appropriate for a given target type, or if a EDN number +// overflows the target type, Unmarshal skips that field and completes the +// unmarshalling as best it can. If no more serious errors are encountered, +// Unmarshal returns an UnmarshalTypeError describing the earliest such error. +// +// The EDN nil value unmarshals into an interface, map, pointer, or slice by +// setting that Go value to nil. +// +// When unmarshaling strings, invalid UTF-8 or invalid UTF-16 surrogate pairs +// are not treated as an error. Instead, they are replaced by the Unicode +// replacement character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + return newDecoder(bufio.NewReader(bytes.NewBuffer(data))).Decode(v) +} + +// UnmarshalString works like Unmarshal, but accepts a string as input instead +// of a byte slice. +func UnmarshalString(data string, v interface{}) error { + return newDecoder(bufio.NewReader(bytes.NewBufferString(data))).Decode(v) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond the +// EDN values requested. +func NewDecoder(r io.Reader) *Decoder { + return newDecoder(bufio.NewReader(r)) +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. The +// reader is valid until the next call to Decode. +func (d *Decoder) Buffered() *bufio.Reader { + return d.rd +} + +// AddTagFn adds a tag function to the decoder's TagMap. Note that TagMaps are +// mutable: If Decoder A and B share TagMap, then adding a tag function to one +// may modify both. +func (d *Decoder) AddTagFn(tagname string, fn interface{}) error { + return d.tagmap.AddTagFn(tagname, fn) +} + +// MustAddTagFn adds a tag function to the decoder's TagMap like AddTagFn, +// except this function also panics if the tag could not be added. +func (d *Decoder) MustAddTagFn(tagname string, fn interface{}) { + d.tagmap.MustAddTagFn(tagname, fn) +} + +// AddTagStruct adds a tag struct to the decoder's TagMap. Note that TagMaps are +// mutable: If Decoder A and B share TagMap, then adding a tag struct to one +// may modify both. +func (d *Decoder) AddTagStruct(tagname string, example interface{}) error { + return d.tagmap.AddTagStruct(tagname, example) +} + +// UseTagMap sets the TagMap provided as the TagMap for this decoder. +func (d *Decoder) UseTagMap(tm *TagMap) { + d.tagmap = tm +} + +// UseMathContext sets the given math context as default math context for this +// decoder. +func (d *Decoder) UseMathContext(mc MathContext) { + d.mc = &mc +} + +func (d *Decoder) mathContext() *MathContext { + if d.mc != nil { + return d.mc + } + return &GlobalMathContext +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + d.disallowUnknownFields = true +} + +// Unmarshaler is the interface implemented by objects that can unmarshal an EDN +// description of themselves. The input can be assumed to be a valid encoding of +// an EDN value. UnmarshalEDN must copy the EDN data if it wishes to retain the +// data after returning. +type Unmarshaler interface { + UnmarshalEDN([]byte) error +} + +type parseState int + +const ( + parseToplevel = iota + parseList + parseVector + parseMap + parseSet + parseTagged + parseDiscard +) + +// A Decoder reads and decodes EDN objects from an input stream. +type Decoder struct { + disallowUnknownFields bool + + lex *lexer + savedError error + rd *bufio.Reader + tagmap *TagMap + mc *MathContext + // parser-specific + prevSlice []byte + prevTtype tokenType + undo bool + // if nextToken returned lexEndPrev, we must write the leftover value at + // next call to nextToken + hasLeftover bool + leftover rune +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "edn: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "edb: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "edn: Unmarshal(nil " + e.Type.String() + ")" +} + +// An UnmarshalTypeError describes a EDN value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of EDN value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to +} + +func (e *UnmarshalTypeError) Error() string { + return "edn: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// UnhashableError is an error which occurs when the decoder attempted to assign +// an unhashable key to a map or set. The position close to where value was +// found is provided to help debugging. +type UnhashableError struct { + Position int64 +} + +func (e *UnhashableError) Error() string { + return "edn: unhashable type at position " + strconv.FormatInt(e.Position, 10) + " in input" +} + +type UnknownFieldError struct { + Field string // the field name + Type reflect.Type // type of Go struct with a missing field +} + +func (e *UnknownFieldError) Error() string { + return "edn: cannot find a field '" + e.Field + "' in a struct " + e.Type.String() + " to unmarshal into" +} + +// Decode reads the next EDN-encoded value from its input and stores it in the +// value pointed to by v. +// +// See the documentation for Unmarshal for details about the conversion of EDN +// into a Go value. +func (d *Decoder) Decode(val interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + // if unhashable, return ErrUnhashable. Else panic unless it's an error + // from the decoder itself. + if rerr, ok := r.(runtime.Error); ok { + if strings.Contains(rerr.Error(), "unhashable") { + err = &UnhashableError{Position: d.lex.position} + } else { + panic(r) + } + } else { + err = r.(error) + } + } + }() + + err = d.more() + if err != nil { + return err + } + + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(val)} + } + + d.value(rv) + + return nil +} + +func newDecoder(buf *bufio.Reader) *Decoder { + lex := lexer{} + lex.reset() + return &Decoder{ + lex: &lex, + rd: buf, + hasLeftover: false, + leftover: '\uFFFD', + tagmap: new(TagMap), + } +} + +func (d *Decoder) getTagFn(tagname string) *reflect.Value { + d.tagmap.RLock() + f, ok := d.tagmap.m[tagname] + d.tagmap.RUnlock() + if ok { + return &f + } + globalTags.RLock() + f, ok = globalTags.m[tagname] + globalTags.RUnlock() + if ok { + return &f + } + return nil +} + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) doUndo(bs []byte, ttype tokenType) { + if d.undo { + d.error(errInternal) // this is LL(1), so this shouldn't happen + } + d.undo = true + d.prevSlice = bs + d.prevTtype = ttype +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *Decoder) array(v reflect.Value, endType tokenType) { + // Check for unmarshaler. + u, pv := d.indirect(v, false) + if u != nil { + switch endType { + case tokenVectorEnd: + d.doUndo([]byte{'['}, tokenVectorStart) + case tokenListEnd: + d.doUndo([]byte{'('}, tokenListStart) + case tokenSetEnd: + d.doUndo([]byte{'#', '{'}, tokenSetStart) + } + bs, err := d.nextValueBytes() + if err == nil { + err = u.UnmarshalEDN(bs) + } + if err != nil { + d.error(err) + } + return + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface(endType))) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.error(&UnmarshalTypeError{"array", v.Type()}) + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + bs, ttype, err := d.nextToken() + if err != nil { + d.error(err) + } + if ttype == endType { + break + } + d.doUndo(bs, ttype) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +func (d *Decoder) arrayInterface(endType tokenType) interface{} { + var v = make([]interface{}, 0) + for { + // look out for endType + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + break + } + if tt == endType { + break + } + d.doUndo(bs, tt) + v = append(v, d.valueInterface()) + } + return v +} + +func (d *Decoder) value(v reflect.Value) { + if !v.IsValid() { + // read value and ignore it + d.valueInterface() + return + } + + bs, ttype, err := d.nextToken() + // check error first + if err != nil { + d.error(err) + return + } + switch ttype { + default: + d.error(errUnexpected) + case tokenSymbol, tokenKeyword, tokenString, tokenInt, tokenFloat, tokenChar: + d.literal(bs, ttype, v) + case tokenTag: + d.tag(bs, v) + case tokenListStart: + d.array(v, tokenListEnd) + case tokenVectorStart: + d.array(v, tokenVectorEnd) + case tokenSetStart: + d.set(v) + case tokenMapStart: + d.ednmap(v) + } +} + +func (d *Decoder) tag(tag []byte, v reflect.Value) { + // Check for unmarshaler. + u, pv := d.indirect(v, false) + if u != nil { + bs, err := d.nextValueBytes() + if err == nil { + err = u.UnmarshalEDN(append(append(tag, ' '), bs...)) + } + if err != nil { + d.error(err) + } + return + } + v = pv + + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.tagInterface(tag))) + return + } + + fn := d.getTagFn(string(tag[1:])) + if fn == nil { + // So in theory we'd have to match against any interface that could be + // assignable to the Tag type, to ensure we would decode whenever possible. + // That is any interface that specifies any combination of the methods + // MarshalEDN, UnmarshalEDN and String. I'm not sure if that makes sense + // though, so I've punted this for now. + bs, err := d.nextValueBytes() + if err != nil { + d.error(err) + } + d.error(UnknownTagError{tag, bs, v.Type()}) + } else { + tfn := fn.Type() + var result reflect.Value + // if not func, just match on struct shape + if tfn.Kind() != reflect.Func { + result = reflect.New(tfn).Elem() + d.value(result) + } else { // otherwise match on input value and call the function + inVal := reflect.New(tfn.In(0)) + d.value(inVal) + res := fn.Call([]reflect.Value{inVal.Elem()}) + if err, ok := res[1].Interface().(error); ok && err != nil { + d.error(err) + } + result = res[0] + } + // result is not necessarily direct, so we have to make it direct, but + // *only* if it's NOT null at every step. Which leads to the question: How + // do we unify these values? This is particularly hairy if these are double + // pointers or bigger. + + // Currently we only attempt to solve this for results by checking if the + // result can be dereferenced into a value. The value will always be a + // non-pointer, so presumably we can assign it in this fashion as a + // temporary resolution. + if result.Type().AssignableTo(v.Type()) { + v.Set(result) + return + } + if result.Kind() == reflect.Ptr && !result.IsNil() && + result.Elem().Type().AssignableTo(v.Type()) { + // is res a non-nil pointer to a value we can assign to? If yes, then + // let's just do that. + v.Set(result.Elem()) + return + } + d.error(fmt.Errorf("Cannot assign %s to %s (tag issue?)", result.Type(), v.Type())) + } +} + +func (d *Decoder) tagInterface(tag []byte) interface{} { + fn := d.getTagFn(string(tag[1:])) + if fn == nil { + var t Tag + t.Tagname = string(tag[1:]) + t.Value = d.valueInterface() + return t + } else if fn.Type().Kind() != reflect.Func { + res := reflect.New(fn.Type()).Elem() + d.value(res) + return res.Interface() + } else { + tfn := fn.Type() + val := reflect.New(tfn.In(0)) + d.value(val) + res := fn.Call([]reflect.Value{val.Elem()}) + if err, ok := res[1].Interface().(error); ok && err != nil { + d.error(err) + } + return res[0].Interface() + } +} + +func (d *Decoder) valueInterface() interface{} { + bs, ttype, err := d.nextToken() + // check error first + if err != nil { + d.error(err) + return nil /// won't get here + } + switch ttype { + default: + d.error(errUnexpected) + return nil + case tokenSymbol, tokenKeyword, tokenString, tokenInt, tokenFloat, tokenChar: + return d.literalInterface(bs, ttype) + case tokenTag: + return d.tagInterface(bs) + case tokenListStart: + return d.arrayInterface(tokenListEnd) + case tokenVectorStart: + return d.arrayInterface(tokenVectorEnd) + case tokenSetStart: + return d.setInterface() + case tokenMapStart: + return d.ednmapInterface() + } + return nil +} + +func (d *Decoder) ednmap(v reflect.Value) { + // Check for unmarshaler. + u, pv := d.indirect(v, false) + if u != nil { + d.doUndo([]byte{'{'}, tokenMapStart) + bs, err := d.nextValueBytes() + if err == nil { + err = u.UnmarshalEDN(bs) + } + if err != nil { + d.error(err) + } + return + } + v = pv + + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.ednmapInterface())) + return + } + + var keyType reflect.Type + + // Check type of target: Struct or map[T]U + switch v.Kind() { + case reflect.Map: + t := v.Type() + keyType = t.Key() + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.error(&UnmarshalTypeError{"map", v.Type()}) + } + + // separate these to ease reading (theoretically fewer checks too) + if v.Kind() == reflect.Struct { + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + skip := false + var key []byte + // The key can either be a symbol, a keyword or a string. We will skip + // anything that is not any of these values. + switch tt { + case tokenSymbol: + if bytes.Equal(bs, falseByte) || bytes.Equal(bs, trueByte) || bytes.Equal(bs, nilByte) { + skip = true + } + key = bs + case tokenKeyword: + key = bs[1:] + case tokenString: + k, ok := unquoteBytes(bs) + key = k + if !ok { + d.error(errInternal) + } + default: + skip = true + } + + if skip { // will panic if something bad happens, so this is fine + d.valueInterface() + continue + } + + var subv reflect.Value + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } else if d.disallowUnknownFields { + d.error(&UnknownFieldError{string(key), v.Type()}) + } + // If subv not set, value() will just skip. + d.value(subv) + } + // if not struct, then it is a map + } else if keyType.Kind() == reflect.Interface && keyType.NumMethod() == 0 { + // special case for unhashable key types + var mapElem reflect.Value + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + d.doUndo(bs, tt) + + key := d.valueInterface() + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv := mapElem + d.value(subv) + + if key == nil { + v.SetMapIndex(reflect.New(keyType).Elem(), subv) + } else { + switch reflect.TypeOf(key).Kind() { + case reflect.Slice, reflect.Map: // bypass issues with unhashable types + v.SetMapIndex(reflect.ValueOf(&key), subv) + default: + v.SetMapIndex(reflect.ValueOf(key), subv) + } + } + } + } else { // default map case + var mapElem reflect.Value + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + d.doUndo(bs, tt) + + // should we do the same as with mapElem? + key := reflect.New(keyType).Elem() + d.value(key) + + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv := mapElem + d.value(subv) + v.SetMapIndex(key, subv) + } + } +} + +func (d *Decoder) ednmapInterface() interface{} { + theMap := make(map[interface{}]interface{}, 0) + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenMapEnd { + break + } + d.doUndo(bs, tt) + key := d.valueInterface() + value := d.valueInterface() + // special case on nil here. nil is hashable, so use it as key. + if key == nil { + theMap[key] = value + } else { + switch reflect.TypeOf(key).Kind() { + case reflect.Slice, reflect.Map: // bypass issues with unhashable types + theMap[&key] = value + default: + theMap[key] = value + } + } + } + return theMap +} + +func (d *Decoder) set(v reflect.Value) { + // Check for unmarshaler. + u, pv := d.indirect(v, false) + if u != nil { + d.doUndo([]byte{'#', '{'}, tokenSetStart) + bs, err := d.nextValueBytes() + if err == nil { + err = u.UnmarshalEDN(bs) + } + if err != nil { + d.error(err) + } + return + } + v = pv + + var setValue reflect.Value + var keyType reflect.Type + + // Check type of target. + // TODO: accept option structs? -- i.e. structs where all fields are bools + // TODO: Also accept slices + switch v.Kind() { + case reflect.Map: + // map must have bool or struct{} value type + t := v.Type() + keyType = t.Key() + valKind := t.Elem().Kind() + switch valKind { + case reflect.Bool: + setValue = reflect.ValueOf(true) + case reflect.Struct: + // check if struct, and if so, ensure it has 0 fields + if t.Elem().NumField() != 0 { + d.error(&UnmarshalTypeError{"set", v.Type()}) + } + setValue = reflect.Zero(t.Elem()) + default: + d.error(&UnmarshalTypeError{"set", v.Type()}) + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Slice, reflect.Array: + // Some extent of rechecking going on when we pass it to array, but it + // should be a constant factor only. + d.array(v, tokenSetEnd) + return + case reflect.Interface: + if v.NumMethod() == 0 { + // break out and use setInterface + v.Set(reflect.ValueOf(d.setInterface())) + return + } else { + d.error(&UnmarshalTypeError{"set", v.Type()}) + } + + default: + d.error(&UnmarshalTypeError{"set", v.Type()}) + } + + // special case here, to avoid panics when we have slices and maps as keys. + // Split out from code below to improve perf + if keyType.Kind() == reflect.Interface && keyType.NumMethod() == 0 { + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + d.doUndo(bs, tt) + key := d.valueInterface() + // special case on nil here: Need to create a zero type of the specific + // keyType. As this is an interface, this will itself be nil. + if key == nil { + v.SetMapIndex(reflect.New(keyType).Elem(), setValue) + } else { + switch reflect.TypeOf(key).Kind() { + case reflect.Slice, reflect.Map: // bypass issues with unhashable types + v.SetMapIndex(reflect.ValueOf(&key), setValue) + default: + v.SetMapIndex(reflect.ValueOf(key), setValue) + } + } + } + } else { + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + d.doUndo(bs, tt) + + key := reflect.New(keyType).Elem() + d.value(key) + v.SetMapIndex(key, setValue) + } + } + +} + +func (d *Decoder) setInterface() interface{} { + theSet := make(map[interface{}]bool, 0) + for { + bs, tt, err := d.nextToken() + if err != nil { + d.error(err) + } + if tt == tokenSetEnd { + break + } + d.doUndo(bs, tt) + key := d.valueInterface() + if key == nil { + theSet[key] = true + } else { + switch reflect.TypeOf(key).Kind() { + case reflect.Slice, reflect.Map: // bypass issues with unhashable types + theSet[&key] = true + default: + theSet[key] = true + } + } + } + return theSet +} + +var nilByte = []byte(`nil`) +var trueByte = []byte(`true`) +var falseByte = []byte(`false`) + +var symbolType = reflect.TypeOf(Symbol("")) +var keywordType = reflect.TypeOf(Keyword("")) +var byteSliceType = reflect.TypeOf([]byte(nil)) + +var bigFloatType = reflect.TypeOf((*big.Float)(nil)).Elem() +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +func (d *Decoder) literal(bs []byte, ttype tokenType, v reflect.Value) { + wantptr := ttype == tokenSymbol && bytes.Equal(nilByte, bs) + u, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalEDN(bs) + if err != nil { + d.error(err) + } + return + } + v = pv + switch ttype { + case tokenSymbol: + if wantptr { // nil + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + default: + d.error(&UnmarshalTypeError{"nil", v.Type()}) + } + } else if bytes.Equal(trueByte, bs) || bytes.Equal(falseByte, bs) { // true|false + value := bs[0] == 't' + switch v.Kind() { + default: + d.error(&UnmarshalTypeError{"bool", v.Type()}) + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.error(&UnmarshalTypeError{"bool", v.Type()}) + } + } + } else if v.Kind() == reflect.String && v.Type() == symbolType { // "actual" symbols + v.SetString(string(bs)) + } else if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(Symbol(string(bs)))) + } else { + d.error(&UnmarshalTypeError{"symbol", v.Type()}) + } + case tokenKeyword: + if v.Kind() == reflect.String && v.Type() == keywordType { // "actual" keywords + v.SetString(string(bs[1:])) + } else if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(Keyword(string(bs[1:])))) + } else { + d.error(&UnmarshalTypeError{"keyword", v.Type()}) + } + case tokenInt: + var s string + isBig := false + if bs[len(bs)-1] == 'N' { // can end with N, which we promptly ignore + // TODO: If the user expects a float and receives what is perceived as an + // int (ends with N), what is the sensible thing to do? + s = string(bs[:len(bs)-1]) + isBig = true + } else { + s = string(bs) + } + switch v.Kind() { + default: + switch v.Type() { + case bigIntType: + bi := v.Addr().Interface().(*big.Int) + _, ok := bi.SetString(s, 10) + if !ok { + d.error(errInternal) + } + case bigFloatType: + mc := d.mathContext() + bf := v.Addr().Interface().(*big.Float) + bf = bf.SetPrec(mc.Precision).SetMode(mc.Mode) + _, _, err := bf.Parse(s, 10) + if err != nil { // grumble grumble + d.error(errInternal) + } + default: + d.error(&UnmarshalTypeError{"int", v.Type()}) + } + case reflect.Interface: + if !isBig { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + d.error(&UnmarshalTypeError{"int " + s, reflect.TypeOf(int64(0))}) + } + if v.NumMethod() != 0 { + d.error(&UnmarshalTypeError{"int", v.Type()}) + } + v.Set(reflect.ValueOf(n)) + } else { + bi := new(big.Int) + _, ok := bi.SetString(s, 10) + if !ok { + d.error(errInternal) + } + v.Set(reflect.ValueOf(bi)) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.error(&UnmarshalTypeError{"int " + s, v.Type()}) + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.error(&UnmarshalTypeError{"int " + s, v.Type()}) + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.error(&UnmarshalTypeError{"int " + s, v.Type()}) + } + v.SetFloat(n) + } + + case tokenFloat: + var s string + isBig := false + if bs[len(bs)-1] == 'M' { // can end with M, which we promptly ignore + s = string(bs[:len(bs)-1]) + isBig = true + } else { + s = string(bs) + } + switch v.Kind() { + default: + switch v.Type() { + case bigFloatType: + mc := d.mathContext() + bf := v.Addr().Interface().(*big.Float) + bf = bf.SetPrec(mc.Precision).SetMode(mc.Mode) + _, _, err := bf.Parse(s, 10) + if err != nil { // grumble grumble + d.error(errInternal) + } + default: + d.error(&UnmarshalTypeError{"float", v.Type()}) + } + case reflect.Interface: + if !isBig { + n, err := strconv.ParseFloat(s, 64) + if err != nil { + d.error(&UnmarshalTypeError{"float " + s, reflect.TypeOf(float64(0))}) + } + if v.NumMethod() != 0 { + d.error(&UnmarshalTypeError{"float", v.Type()}) + } + v.Set(reflect.ValueOf(n)) + } else { + mc := d.mathContext() + bf := new(big.Float).SetPrec(mc.Precision).SetMode(mc.Mode) + _, _, err := bf.Parse(s, 10) + if err != nil { // grumble grumble + d.error(errInternal) + } + v.Set(reflect.ValueOf(bf)) + } + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.error(&UnmarshalTypeError{"float " + s, v.Type()}) + } + v.SetFloat(n) + } + case tokenChar: + r, err := toRune(bs) + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + d.error(&UnmarshalTypeError{"rune", v.Type()}) + case reflect.Interface: + if v.NumMethod() != 0 { + d.error(&UnmarshalTypeError{"rune", v.Type()}) + } + v.Set(reflect.ValueOf(r)) + case reflect.Int32: // rune is an alias for int32 + v.SetInt(int64(r)) + } + case tokenString: + s, ok := unquoteBytes(bs) + if !ok { + d.error(errInternal) + } + switch v.Kind() { + default: + d.error(&UnmarshalTypeError{"string", v.Type()}) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.error(&UnmarshalTypeError{"string", v.Type()}) + } + } + default: + d.error(errInternal) + } +} + +func (d *Decoder) literalInterface(bs []byte, ttype tokenType) interface{} { + switch ttype { + case tokenSymbol: + if bytes.Equal(nilByte, bs) { + return nil + } + if bytes.Equal(trueByte, bs) { + return true + } + if bytes.Equal(falseByte, bs) { + return false + } + return Symbol(string(bs)) + case tokenKeyword: + return Keyword(string(bs[1:])) + case tokenInt: + if bs[len(bs)-1] == 'N' { // can end with N + var bi big.Int + s := string(bs[:len(bs)-1]) + _, ok := bi.SetString(s, 10) + if !ok { + d.error(errInternal) + } + return bi + } else { + s := string(bs) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + d.error(err) + } + return n + } + case tokenFloat: + var s string + if bs[len(bs)-1] == 'M' { // can end with M, which we promptly ignore + s = string(bs[:len(bs)-1]) + } else { + s = string(bs) + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + d.error(err) + } + return n + case tokenChar: + r, err := toRune(bs) + if err != nil { + d.error(err) + } + return r + case tokenString: + t, ok := unquote(bs) + if !ok { + d.error(errInternal) + } + return t + default: + d.error(errInternal) + return nil + } +} + +var ( + newlineBytes = []byte(`\newline`) + returnBytes = []byte(`\return`) + spaceBytes = []byte(`\space`) + tabBytes = []byte(`\tab`) + formfeedBytes = []byte(`\formfeed`) +) + +func toRune(bs []byte) (rune, error) { + // handle special cases first: + switch { + case bytes.Equal(bs, newlineBytes): + return '\n', nil + case bytes.Equal(bs, returnBytes): + return '\r', nil + case bytes.Equal(bs, spaceBytes): + return ' ', nil + case bytes.Equal(bs, tabBytes): + return '\t', nil + case bytes.Equal(bs, formfeedBytes): + return '\f', nil + case len(bs) == 6 && bs[1] == 'u': // I don't think unicode chars could be 5 bytes long? + return getu4(bs), nil + default: + r, size := utf8.DecodeRune(bs[1:]) + if r == utf8.RuneError && size == 1 { + return r, errIllegalRune + } + return r, nil + } +} + +// nextToken handles #_ +func (d *Decoder) nextToken() ([]byte, tokenType, error) { + bs, tt, err := d.rawToken() + if err != nil { + return bs, tt, err + } + switch tt { + case tokenDiscard: + err := d.traverseValue() + if err != nil { + return nil, tokenError, err + } + return d.nextToken() // again for discards + default: + return bs, tt, err + } +} + +func (d *Decoder) rawToken() ([]byte, tokenType, error) { + if d.undo { + d.undo = false + b := d.prevSlice + tt := d.prevTtype + d.prevSlice = nil + d.prevTtype = tokenError + return b, tt, nil + } + var val bytes.Buffer + d.lex.reset() + doIgnore := true + if d.hasLeftover { + d.hasLeftover = false + d.lex.position++ + switch d.lex.state(d.leftover) { + case lexCont: + val.WriteRune(d.leftover) + doIgnore = false + case lexEnd: + val.WriteRune(d.leftover) + return val.Bytes(), d.lex.token, nil + case lexEndPrev: + return nil, tokenError, errInternal + case lexError: + return nil, tokenError, d.lex.err + case lexIgnore: + // just ignore + } + } + if doIgnore { // ignore whitespace + readWhitespace: + for { + r, _, err := d.rd.ReadRune() + if err == io.EOF { + return nil, tokenError, errNoneLeft + } + if err != nil { + return nil, tokenError, err + } + d.lex.position++ + switch d.lex.state(r) { + case lexCont: // got a value, so continue on past doIgnoring + // TODO: This returns an error. Will it happen in practice? Probably? + val.WriteRune(r) + break readWhitespace + case lexError: + return nil, tokenError, d.lex.err + case lexEnd: + val.WriteRune(r) + return val.Bytes(), d.lex.token, nil + case lexEndPrev: + return nil, tokenError, errInternal + case lexIgnore: + // keep on reading + } + } + } + for { + r, _, err := d.rd.ReadRune() + var ls lexState + // this is not exactly perfect. + switch { + case err == io.EOF: + ls = d.lex.eof() + case err != nil: + return nil, tokenError, err + default: + d.lex.position++ + ls = d.lex.state(r) + } + switch ls { + case lexCont: + val.WriteRune(r) + case lexIgnore: + if err != io.EOF { + return nil, tokenError, errInternal + } else { + return nil, tokenError, errNoneLeft + } + case lexEnd: + if err != io.EOF { + val.WriteRune(r) + } + return val.Bytes(), d.lex.token, nil + case lexEndPrev: + d.hasLeftover = true + d.leftover = r + return val.Bytes(), d.lex.token, nil + case lexError: + return nil, tokenError, d.lex.err + } + } +} + +// traverseValue reads a single value and skips it -- whether it is a list, map +// or a literal. Doesn't validate its state. skips over discard tokens as well. +func (d *Decoder) traverseValue() error { + tstack := newTokenStack() + for { + _, tt, err := d.nextToken() + if err != nil { + return err + } + err = tstack.push(tt) + if err != nil || tstack.done() { + return err + } + } +} + +type tokenStackElem struct { + tt tokenType + count int +} + +type tokenStack struct { + toks []tokenStackElem + toplevel tokenType +} + +func newTokenStack() *tokenStack { + return &tokenStack{ + toks: nil, + toplevel: tokenError, + } +} + +func (t *tokenStack) done() bool { + return len(t.toks) == 0 && t.toplevel != tokenDiscard +} + +func (t *tokenStack) peek() tokenType { + return t.toks[len(t.toks)-1].tt +} + +func (t *tokenStack) peekCount() int { + return t.toks[len(t.toks)-1].count +} + +func (t *tokenStack) pop() { + t.toks = t.toks[:len(t.toks)-1] +} + +func (t *tokenStack) push(tt tokenType) error { + // retain toplevel value for done check + if len(t.toks) == 0 { + t.toplevel = tt + } + switch tt { + case tokenMapStart, tokenVectorStart, tokenListStart, tokenSetStart, tokenDiscard, tokenTag: + // append to toks, regardless + t.toks = append(t.toks, tokenStackElem{tt, 0}) + return nil + case tokenMapEnd: + if len(t.toks) == 0 || (t.peek() != tokenMapStart && t.peek() != tokenSetStart) { + return errUnexpected + } + t.pop() + case tokenListEnd: + if len(t.toks) == 0 || t.peek() != tokenListStart { + return errUnexpected + } + t.pop() + case tokenVectorEnd: + if len(t.toks) == 0 || t.peek() != tokenVectorStart { + return errUnexpected + } + t.pop() + default: + } + if len(t.toks) > 0 { + t.toks[len(t.toks)-1].count++ + } + // popping of discards and tags + for len(t.toks) > 0 && t.peek() == tokenTag { + t.pop() + if len(t.toks) > 0 { + t.toks[len(t.toks)-1].count++ + } + } + if len(t.toks) > 0 && t.peek() == tokenDiscard { + t.pop() + } + return nil +} + +// more removes whitespace and discards, and returns nil if there is more data. +// If the end of the stream is found, io.EOF is sent back. If an error happens +// while parsing a discard value, it is passed up. +func (d *Decoder) more() error { + if d.undo { + return nil + } + if d.hasLeftover && d.leftover == '#' { + // check if next rune is '_' + r, _, err := d.rd.ReadRune() + if err == io.EOF { + return errNoneLeft + } + if err != nil { + return err + } + if r != '_' { + // it's not discard, so let's just unread the rune + return d.rd.UnreadRune() + } + // need to consume a value + d.hasLeftover = false + d.leftover = '\uFFFD' + d.lex.position += 2 + err = d.traverseValue() + if err != nil { + return err + } + return d.more() + } + if d.hasLeftover && !isWhitespace(d.leftover) && d.leftover != ';' { + return nil + } + + // If we've come to this step, we need to read whitespace and -- if we find + // something suspicious, we need to check if it can be assumed to be + // whitespace. + d.lex.reset() + for { + var r rune + var err error + readWhitespace: + for { + r, _, err = d.rd.ReadRune() + if err != nil { + return err + // if we hit the end of the line, then we don't have more and we return + // io.EOF + } + d.lex.position++ + switch d.lex.state(r) { + case lexCont: // found something that looks like a value, so break out of whitespace loop + break readWhitespace + case lexError: + return d.lex.err + case lexEnd: // found a delimiter of some sort, so store it as leftover and return nil + d.hasLeftover = true + d.leftover = r + d.lex.position-- + return nil + case lexEndPrev: + return errInternal + case lexIgnore: + // keep on readin' + } + } + + if r == '#' { // the edge case again, so let's gobble + // check if next rune is '_' + r, _, err := d.rd.ReadRune() + if err == io.EOF { + return errNoneLeft + } + if err != nil { + return err + } + if r != '_' { + // it's not discard, so we unread the rune and put # as leftover + d.leftover = '#' + d.hasLeftover = true + d.lex.position-- + return d.rd.UnreadRune() + } + // need to consume a value + d.hasLeftover = false + d.leftover = '\uFFFD' + d.lex.position += 2 + err = d.traverseValue() + if err != nil { + return err + } + return d.more() + } else { // we could do unreadrune here too, would've been just as fine + d.hasLeftover = true + d.leftover = r + d.lex.position-- + return nil + } + } +} + +// Oh, asking about why this is so similar to the part above, eh? Yes, I would +// also consider this a crime. At least I use the same lexer. This is probably +// next on the list when I have people complaining about perf issues. +func (d *Decoder) nextValueBytes() ([]byte, error) { + // TODO: Ensure values inside maps come in pairs. + tstack := newTokenStack() + var val bytes.Buffer + if d.undo { + d.undo = false + b := d.prevSlice + tt := d.prevTtype + d.prevSlice = nil + d.prevTtype = tokenError + if tt == tokenDiscard { // should be impossible to get a tokenDiscard here? + return nil, errInternal + } + err := tstack.push(tt) + if err != nil || tstack.done() { + return val.Bytes(), err + } + val.Write(b) + } +readElems: + for { + d.lex.reset() + // Can't ignore whitespace in general. So I guess we just add it onto the buffer + readWs := true + if d.hasLeftover { + // we can have leftover from previous iteration. e.g. "foo[bar]" will have + // leftover "[" and "]" + d.hasLeftover = false + d.lex.position++ + val.WriteRune(d.leftover) + switch d.lex.state(d.leftover) { + case lexCont: + readWs = false + case lexEnd: + err := tstack.push(d.lex.token) + if err != nil || tstack.done() { + return val.Bytes(), err + } + d.lex.reset() + case lexEndPrev: + return nil, errInternal + case lexError: + return nil, d.lex.err + case lexIgnore: + // just keep going + } + } + if readWs { + readWhitespace: + // If we end up here, it means we expect at least one more token + for { + r, _, err := d.rd.ReadRune() + if err == io.EOF { + return nil, errNoneLeft + } + if err != nil { + return nil, err + } + d.lex.position++ + val.WriteRune(r) + switch d.lex.state(r) { + case lexCont: // found something that looks like a value, so break out of whitespace loop + break readWhitespace + case lexError: + return nil, d.lex.err + case lexEnd: + err := tstack.push(d.lex.token) + if err != nil || tstack.done() { + return val.Bytes(), err + } + // Here we'd usually continue on next iteration loop (which is safe + // and valid), but since we know we don't have any leftovers, we can + // just reset the lexer and keep attempting to read whitespace. + d.lex.reset() + case lexEndPrev: + return nil, errInternal + case lexIgnore: + // keep on readin' + } + } + } + // read element + for { + r, rlength, err := d.rd.ReadRune() + var ls lexState + // ugh, this is not exactly perfect. + switch { + case err == io.EOF: + ls = d.lex.eof() + case err != nil: + return nil, err + default: + d.lex.position++ + val.WriteRune(r) + ls = d.lex.state(r) + } + switch ls { + case lexCont: + // keep going + case lexIgnore: + if err != io.EOF { + return nil, errInternal + } else { + return nil, errNoneLeft + } + case lexEnd: + ioErr := err + err := tstack.push(d.lex.token) + if err != nil || tstack.done() { + return val.Bytes(), err + } + if ioErr == io.EOF /* && !tstack.done() */ { + return nil, errNoneLeft + } + continue readElems + case lexEndPrev: // if err == io.EOF then we cannot end up here. (Invariant forced by lexer) + val.Truncate(val.Len() - rlength) + d.hasLeftover = true + d.leftover = r + + err := tstack.push(d.lex.token) + if err != nil || tstack.done() { + return val.Bytes(), err + } + continue readElems + case lexError: + return nil, d.lex.err + } + } + } +} diff --git a/vendor/olympos.io/encoding/edn/edn_tags.go b/vendor/olympos.io/encoding/edn/edn_tags.go new file mode 100644 index 00000000..4097c061 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/edn_tags.go @@ -0,0 +1,142 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edn implements encoding and decoding of EDN values as defined in +// https://github.com/edn-format/edn. For a full introduction on how to use +// go-edn, see https://github.com/go-edn/edn/blob/v1/docs/introduction.md. Fully +// self-contained examples of go-edn can be found at +// https://github.com/go-edn/edn/tree/v1/examples. +// +// Note that the small examples in this package is not checking errors as +// persively as you should do when you use this package. This is done because +// I'd like the examples to be easily readable and understandable. The bigger +// examples provide proper error handling. +package edn + +import ( + "encoding/base64" + "errors" + "math/big" + "reflect" + "sync" + "time" +) + +var ( + ErrNotFunc = errors.New("Value is not a function") + ErrMismatchArities = errors.New("Function does not have single argument in, two argument out") + ErrNotConcrete = errors.New("Value is not a concrete non-function type") + ErrTagOverwritten = errors.New("Previous tag implementation was overwritten") +) + +var globalTags TagMap + +// A TagMap contains mappings from tag literals to functions and structs that is +// used when decoding. +type TagMap struct { + sync.RWMutex + m map[string]reflect.Value +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +// AddTagFn adds fn as a converter function for tagname tags to this TagMap. fn +// must have the signature func(T) (U, error), where T is the expected input +// type and U is the output type. See Decoder.AddTagFn for examples. +func (tm *TagMap) AddTagFn(tagname string, fn interface{}) error { + // TODO: check name + rfn := reflect.ValueOf(fn) + rtyp := rfn.Type() + if rtyp.Kind() != reflect.Func { + return ErrNotFunc + } + if rtyp.NumIn() != 1 || rtyp.NumOut() != 2 || !rtyp.Out(1).Implements(errorType) { + // ok to have variadic arity? + return ErrMismatchArities + } + return tm.addVal(tagname, rfn) +} + +// MustAddTagFn adds fn as a converter function for tagname tags to this TagMap +// like AddTagFn, except this function panics if the tag could not be added. +func (tm *TagMap) MustAddTagFn(tagname string, fn interface{}) { + if err := tm.AddTagFn(tagname, fn); err != nil { + panic(err) + } +} + +func (tm *TagMap) addVal(name string, val reflect.Value) error { + tm.Lock() + if tm.m == nil { + tm.m = map[string]reflect.Value{} + } + _, ok := tm.m[name] + tm.m[name] = val + tm.Unlock() + if ok { + return ErrTagOverwritten + } else { + return nil + } +} + +// AddTagFn adds fn as a converter function for tagname tags to the global +// TagMap. fn must have the signature func(T) (U, error), where T is the +// expected input type and U is the output type. See Decoder.AddTagFn for +// examples. +func AddTagFn(tagname string, fn interface{}) error { + return globalTags.AddTagFn(tagname, fn) +} + +// MustAddTagFn adds fn as a converter function for tagname tags to the global +// TagMap like AddTagFn, except this function panics if the tag could not be added. +func MustAddTagFn(tagname string, fn interface{}) { + globalTags.MustAddTagFn(tagname, fn) +} + +// AddTagStructs adds the struct as a matching struct for tagname tags to this +// TagMap. val can not be a channel, function, interface or an unsafe pointer. +// See Decoder.AddTagStruct for examples. +func (tm *TagMap) AddTagStruct(tagname string, val interface{}) error { + rstruct := reflect.ValueOf(val) + switch rstruct.Type().Kind() { + case reflect.Invalid, reflect.Chan, reflect.Func, reflect.Interface, reflect.UnsafePointer: + return ErrNotConcrete + } + return tm.addVal(tagname, rstruct) +} + +// AddTagStructs adds the struct as a matching struct for tagname tags to the +// global TagMap. val can not be a channel, function, interface or an unsafe +// pointer. See Decoder.AddTagStruct for examples. +func AddTagStruct(tagname string, val interface{}) error { + return globalTags.AddTagStruct(tagname, val) +} + +func init() { + err := AddTagFn("inst", func(s string) (time.Time, error) { + return time.Parse(time.RFC3339Nano, s) + }) + if err != nil { + panic(err) + } + err = AddTagFn("base64", base64.StdEncoding.DecodeString) + if err != nil { + panic(err) + } +} + +// A MathContext specifies the precision and rounding mode for +// `math/big.Float`s when decoding. +type MathContext struct { + Precision uint + Mode big.RoundingMode +} + +// The GlobalMathContext is the global MathContext. It is used if no other +// context is provided. See MathContext for example usage. +var GlobalMathContext = MathContext{ + Mode: big.ToNearestEven, + Precision: 192, +} diff --git a/vendor/olympos.io/encoding/edn/encode.go b/vendor/olympos.io/encoding/edn/encode.go new file mode 100644 index 00000000..5635dcd6 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/encode.go @@ -0,0 +1,1422 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bytes" + "encoding/base64" + "io" + "math" + "math/big" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + "unicode/utf8" +) + +// Marshal returns the EDN encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalEDN method +// to produce EDN. The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalEDN. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as EDN booleans. +// +// Integers encode as EDN integers. +// +// Floating point values encode as EDN floats. +// +// String values encode as EDN strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting EDN output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as EDN arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the nil EDN value. +// +// Struct values encode as EDN maps. Each exported struct field +// becomes a member of the map unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The map's default key is the struct field name as a keyword, +// but can be specified in the struct field's tag value. The "edn" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `edn:"-"` +// +// // Field appears in EDN as key :my-name. +// Field int `edn:"myName"` +// +// // Field appears in EDN as key :my-name and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `edn:"my-name,omitempty"` +// +// // Field appears in EDN as key :field (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `edn:",omitempty"` +// +// The "str", "key" and "sym" options signals that a field name should be +// written as a string, keyword or symbol, respectively. If none are specified, +// then the default behaviour is to emit them as keywords. Examples: +// +// // Default behaviour: field name will be encoded as :foo +// Foo int +// +// // Encode Foo as string with name "string-foo" +// Foo int `edn:"string-foo,str"` +// +// // Encode Foo as symbol with name sym-foo +// Foo int `edn:"sym-foo,sym"` +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its EDN tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for EDN when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are EDN-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// To force ignoring of an anonymous struct field in both current and earlier +// versions, give the field a EDN tag of "-". +// +// Map values usually encode as EDN maps. There are no limitations on the keys +// or values -- as long as they can be encoded to EDN, anything goes. Map values +// will be encoded as sets if their value type is either a bool or a struct with +// no fields. +// +// If you want to ensure that a value is encoded as a map, you can specify that +// as follows: +// +// // Encode Foo as a map, instead of the default set +// Foo map[int]bool `edn:",map"` +// +// Arrays and slices are encoded as vectors by default. As with maps and sets, +// you can specify that a field should be encoded as a list instead, by using +// the option "list": +// +// // Encode Foo as a list, instead of the default vector +// Foo []int `edn:",list"` +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the nil EDN object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the nil EDN value. +// +// Channel, complex, and function values cannot be encoded in EDN. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// EDN cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalPPrint is like Marshal but applies PPrint to format the output. +func MarshalPPrint(v interface{}, opts *PPrintOpts) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = PPrint(&buf, b, opts) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// An Encoder writes EDN values to an output stream. +type Encoder struct { + writer io.Writer + ec encodeState +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + writer: w, + ec: encodeState{}, + } +} + +// Encode writes the EDN encoding of v to the stream, followed by a newline +// character. +// +// See the documentation for Marshal for details about the conversion of Go +// values to EDN. +func (e *Encoder) Encode(v interface{}) error { + e.ec.needsDelim = false + err := e.ec.marshal(v) + if err != nil { + e.ec.Reset() + return err + } + b := e.ec.Bytes() + e.ec.Reset() + _, err = e.writer.Write(b) + if err != nil { + return err + } + _, err = e.writer.Write([]byte{'\n'}) + return err +} + +// EncodeIndent writes the indented EDN encoding of v to the stream, followed by +// a newline character. +// +// See the documentation for MarshalIndent for details about the conversion of +// Go values to EDN. +func (e *Encoder) EncodeIndent(v interface{}, prefix, indent string) error { + e.ec.needsDelim = false + err := e.ec.marshal(v) + if err != nil { + e.ec.Reset() + return err + } + b := e.ec.Bytes() + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + e.ec.Reset() + if err != nil { + return err + } + _, err = e.writer.Write(buf.Bytes()) + if err != nil { + return err + } + _, err = e.writer.Write([]byte{'\n'}) + return err +} + +// EncodePPrint writes the pretty-printed EDN encoding of v to the stream, +// followed by a newline character. +// +// See the documentation for MarshalPPrint for details about the conversion of +// Go values to EDN. +func (e *Encoder) EncodePPrint(v interface{}, opts *PPrintOpts) error { + e.ec.needsDelim = false + err := e.ec.marshal(v) + if err != nil { + e.ec.Reset() + return err + } + b := e.ec.Bytes() + var buf bytes.Buffer + err = PPrint(&buf, b, opts) + e.ec.Reset() + if err != nil { + return err + } + _, err = e.writer.Write(buf.Bytes()) + if err != nil { + return err + } + _, err = e.writer.Write([]byte{'\n'}) + return err +} + +// Marshaler is the interface implemented by objects that +// can marshal themselves into valid EDN. +type Marshaler interface { + MarshalEDN() ([]byte, error) +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "edn: unsupported type: " + e.Type.String() +} + +// An UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. Examples include the float values NaN and Infinity. +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return "edn: unsupported value: " + e.Str +} + +// A MarshalerError is returned by Marshal when encoding a type with a +// MarshalEDN function fails. +type MarshalerError struct { + Type reflect.Type + Err error +} + +func (e *MarshalerError) Error() string { + return "edn: error calling MarshalEDN for type " + e.Type.String() + ": " + e.Err.Error() +} + +var hex = "0123456789abcdef" + +// An encodeState encodes EDN into a bytes.Buffer. +type encodeState struct { + bytes.Buffer // accumulated output + scratch [64]byte + needsDelim bool + mc *MathContext +} + +// mathContext returns the math context to use. If not set in the encodeState, +// the global math context is used. +func (e *encodeState) mathContext() *MathContext { + if e.mc != nil { + return e.mc + } + return &GlobalMathContext +} + +var encodeStatePool sync.Pool + +func newEncodeState() *encodeState { + if v := encodeStatePool.Get(); v != nil { + e := v.(*encodeState) + e.Reset() + return e + } + return new(encodeState) +} + +func (e *encodeState) marshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if s, ok := r.(string); ok { + panic(s) + } + err = r.(error) + } + }() + e.reflectValue(reflect.ValueOf(v)) + return nil +} + +func (e *encodeState) error(err error) { + panic(err) +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (e *encodeState) reflectValue(v reflect.Value) { + valueEncoder(v)(e, v) +} + +type encoderFunc func(e *encodeState, v reflect.Value) + +type typeAndTag struct { + t reflect.Type + ctype tagType +} + +var encoderCache struct { + sync.RWMutex + m map[typeAndTag]encoderFunc +} + +func valueEncoder(v reflect.Value) encoderFunc { + if !v.IsValid() { + return invalidValueEncoder + } + return typeEncoder(v.Type(), tagUndefined) +} + +func typeEncoder(t reflect.Type, tagType tagType) encoderFunc { + tac := typeAndTag{t, tagType} + encoderCache.RLock() + f := encoderCache.m[tac] + encoderCache.RUnlock() + if f != nil { + return f + } + couldUseJSON := readCanUseJSONTag() + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + encoderCache.Lock() + if encoderCache.m == nil { + encoderCache.m = make(map[typeAndTag]encoderFunc) + } + var wg sync.WaitGroup + wg.Add(1) + encoderCache.m[tac] = func(e *encodeState, v reflect.Value) { + wg.Wait() + f(e, v) + } + encoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeEncoder(t, tagType, true) + wg.Done() + encoderCache.Lock() + if couldUseJSON != readCanUseJSONTag() { + // cache has been invalidated, unlock and retry recursively. + encoderCache.Unlock() + return typeEncoder(t, tagType) + } + encoderCache.m[tac] = f + encoderCache.Unlock() + return f +} + +var ( + marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + instType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// newTypeEncoder constructs an encoderFunc for a type. +// The returned encoder only checks CanAddr when allowAddr is true. +func newTypeEncoder(t reflect.Type, tagType tagType, allowAddr bool) encoderFunc { + if t.Implements(marshalerType) { + return marshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(marshalerType) { + return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, tagType, false)) + } + } + + // Handle specific types first + switch t { + case bigIntType: + return bigIntEncoder + case bigFloatType: + return bigFloatEncoder + case instType: + return instEncoder + } + + switch t.Kind() { + case reflect.Bool: + return boolEncoder + case reflect.Int32: + if tagType == tagRune { + return runeEncoder + } + return intEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int64: + return intEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintEncoder + case reflect.Float32: + return float32Encoder + case reflect.Float64: + return float64Encoder + case reflect.String: + return stringEncoder + case reflect.Interface: + return interfaceEncoder + case reflect.Struct: + return newStructEncoder(t, tagType) + case reflect.Map: + return newMapEncoder(t, tagType) + case reflect.Slice: + return newSliceEncoder(t, tagType) + case reflect.Array: + return newArrayEncoder(t, tagType) + case reflect.Ptr: + return newPtrEncoder(t, tagType) + default: + return unsupportedTypeEncoder + } +} + +func invalidValueEncoder(e *encodeState, v reflect.Value) { + e.writeNil() +} + +func marshalerEncoder(e *encodeState, v reflect.Value) { + if v.Kind() == reflect.Ptr && v.IsNil() { + e.writeNil() + return + } + m := v.Interface().(Marshaler) + b, err := m.MarshalEDN() + if err == nil { + // copy EDN into buffer, checking (token) validity. + e.ensureDelim() + err = Compact(&e.Buffer, b) + e.needsDelim = true + } + if err != nil { + e.error(&MarshalerError{v.Type(), err}) + } +} + +func addrMarshalerEncoder(e *encodeState, v reflect.Value) { + va := v.Addr() + if va.IsNil() { + e.writeNil() + return + } + m := va.Interface().(Marshaler) + b, err := m.MarshalEDN() + if err == nil { + // copy EDN into buffer, checking (token) validity. + e.ensureDelim() + err = Compact(&e.Buffer, b) + e.needsDelim = true + } + if err != nil { + e.error(&MarshalerError{v.Type(), err}) + } +} + +func boolEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + if v.Bool() { + e.WriteString("true") + } else { + e.WriteString("false") + } + e.needsDelim = true +} + +func runeEncoder(e *encodeState, v reflect.Value) { + encodeRune(&e.Buffer, rune(v.Int())) + e.needsDelim = true +} + +func intEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + b := strconv.AppendInt(e.scratch[:0], v.Int(), 10) + e.Write(b) + e.needsDelim = true +} + +func uintEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10) + e.Write(b) + e.needsDelim = true +} + +func bigIntEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + bi := v.Interface().(big.Int) + b := []byte(bi.String()) + e.Write(b) + e.WriteByte('N') + e.needsDelim = true +} + +func bigFloatEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + bf := new(big.Float) + mc := e.mathContext() + val := v.Interface().(big.Float) + bf.Set(&val).SetMode(mc.Mode) + b := []byte(bf.Text('g', int(mc.Precision))) + e.Write(b) + e.WriteByte('M') + e.needsDelim = true +} + +func instEncoder(e *encodeState, v reflect.Value) { + e.ensureDelim() + t := v.Interface().(time.Time) + e.Write([]byte(t.Format(`#inst"` + time.RFC3339Nano + `"`))) +} + +type floatEncoder int // number of bits + +func (bits floatEncoder) encode(e *encodeState, v reflect.Value) { + f := v.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))}) + } + e.ensureDelim() + b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits)) + if ix := bytes.IndexAny(b, ".eE"); ix < 0 { + b = append(b, '.', '0') + } + e.Write(b) + e.needsDelim = true +} + +var ( + float32Encoder = (floatEncoder(32)).encode + float64Encoder = (floatEncoder(64)).encode +) + +func stringEncoder(e *encodeState, v reflect.Value) { + e.string(v.String()) +} + +func interfaceEncoder(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + e.reflectValue(v.Elem()) +} + +func unsupportedTypeEncoder(e *encodeState, v reflect.Value) { + e.error(&UnsupportedTypeError{v.Type()}) +} + +type structEncoder struct { + fields []field + fieldEncs []encoderFunc +} + +func (se *structEncoder) encode(e *encodeState, v reflect.Value) { + e.WriteByte('{') + e.needsDelim = false + for i, f := range se.fields { + fv := fieldByIndex(v, f.index) + if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { + continue + } + switch f.fnameType { + case emitKey: + e.ensureDelim() + e.WriteByte(':') + e.WriteString(f.name) + e.needsDelim = true + case emitString: + e.string(f.name) + e.needsDelim = false + case emitSym: + e.ensureDelim() + e.WriteString(f.name) + e.needsDelim = true + } + se.fieldEncs[i](e, fv) + } + e.WriteByte('}') + e.needsDelim = false +} + +func newStructEncoder(t reflect.Type, tagType tagType) encoderFunc { + fields := cachedTypeFields(t) + se := &structEncoder{ + fields: fields, + fieldEncs: make([]encoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index), f.tagType) + } + return se.encode +} + +type mapEncoder struct { + keyEnc encoderFunc + elemEnc encoderFunc +} + +func (me *mapEncoder) encode(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + e.WriteByte('{') + e.needsDelim = false + mk := v.MapKeys() + // NB: We don't get deterministic results here, because we don't iterate in a + // determinstic way. + for _, k := range mk { + if e.needsDelim { // bypass conventional whitespace to use commas instead + e.WriteByte(',') + e.needsDelim = false + } + me.keyEnc(e, k) + me.elemEnc(e, v.MapIndex(k)) + } + e.WriteByte('}') + e.needsDelim = false +} + +type mapSetEncoder struct { + keyEnc encoderFunc +} + +func (me *mapSetEncoder) encode(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + e.ensureDelim() + e.WriteByte('#') + e.WriteByte('{') + e.needsDelim = false + mk := v.MapKeys() + // not deterministic this one either. + for _, k := range mk { + mval := v.MapIndex(k) + if mval.Kind() != reflect.Bool || mval.Bool() { + me.keyEnc(e, k) + } + } + e.WriteByte('}') + e.needsDelim = false +} + +func newMapEncoder(t reflect.Type, tagType tagType) encoderFunc { + canBeSet := false + switch t.Elem().Kind() { + case reflect.Struct: + if t.Elem().NumField() == 0 { + canBeSet = true + } + case reflect.Bool: + canBeSet = true + } + if (tagType == tagUndefined || tagType == tagSet) && canBeSet { + me := &mapSetEncoder{typeEncoder(t.Key(), tagUndefined)} + return me.encode + } + if tagType != tagUndefined && tagType != tagMap { + return unsupportedTypeEncoder + } + me := &mapEncoder{ + typeEncoder(t.Key(), tagUndefined), + typeEncoder(t.Elem(), tagUndefined), + } + return me.encode +} + +func encodeByteSlice(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + s := v.Bytes() + e.ensureDelim() + e.WriteString(`#base64"`) + if len(s) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) + base64.StdEncoding.Encode(dst, s) + e.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, e) + enc.Write(s) + enc.Close() + } + e.WriteByte('"') +} + +// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. +type sliceEncoder struct { + arrayEnc encoderFunc +} + +func (e *encodeState) ensureDelim() { + if e.needsDelim { + e.WriteByte(' ') + } +} + +func (e *encodeState) writeNil() { + e.ensureDelim() + e.WriteString("nil") + e.needsDelim = true +} + +func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + se.arrayEnc(e, v) +} + +func newSliceEncoder(t reflect.Type, tagType tagType) encoderFunc { + // Byte slices get special treatment; arrays don't. + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteSlice + } + enc := &sliceEncoder{newArrayEncoder(t, tagType)} + return enc.encode +} + +type arrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) { + e.WriteByte('[') + e.needsDelim = false + n := v.Len() + for i := 0; i < n; i++ { + ae.elemEnc(e, v.Index(i)) + } + e.WriteByte(']') + e.needsDelim = false +} + +type listArrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *listArrayEncoder) encode(e *encodeState, v reflect.Value) { + e.WriteByte('(') + e.needsDelim = false + n := v.Len() + for i := 0; i < n; i++ { + ae.elemEnc(e, v.Index(i)) + } + e.WriteByte(')') + e.needsDelim = false +} + +type setArrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *setArrayEncoder) encode(e *encodeState, v reflect.Value) { + e.ensureDelim() + e.WriteByte('#') + e.WriteByte('{') + e.needsDelim = false + n := v.Len() + for i := 0; i < n; i++ { + ae.elemEnc(e, v.Index(i)) + } + e.WriteByte('}') + e.needsDelim = false +} + +func newArrayEncoder(t reflect.Type, tagType tagType) encoderFunc { + switch tagType { + case tagList: + enc := &listArrayEncoder{typeEncoder(t.Elem(), tagUndefined)} + return enc.encode + case tagSet: + enc := &setArrayEncoder{typeEncoder(t.Elem(), tagUndefined)} + return enc.encode + default: + enc := &arrayEncoder{typeEncoder(t.Elem(), tagUndefined)} + return enc.encode + } +} + +type ptrEncoder struct { + elemEnc encoderFunc +} + +func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) { + if v.IsNil() { + e.writeNil() + return + } + pe.elemEnc(e, v.Elem()) +} + +func newPtrEncoder(t reflect.Type, tagType tagType) encoderFunc { + enc := &ptrEncoder{typeEncoder(t.Elem(), tagType)} + return enc.encode +} + +type condAddrEncoder struct { + canAddrEnc, elseEnc encoderFunc +} + +func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value) { + if v.CanAddr() { + ce.canAddrEnc(e, v) + } else { + ce.elseEnc(e, v) + } +} + +// newCondAddrEncoder returns an encoder that checks whether its value +// CanAddr and delegates to canAddrEnc if so, else to elseEnc. +func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { + enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return enc.encode +} + +// NOTE: keep in sync with stringBytes below. +func (e *encodeState) string(s string) (int, error) { + len0 := e.Len() + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + e.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, > and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into EDN and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.WriteString(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.WriteString(s[start:]) + } + e.WriteByte('"') + e.needsDelim = false + return e.Len() - len0, nil +} + +// NOTE: keep in sync with string above. +func (e *encodeState) stringBytes(s []byte) (int, error) { + len0 := e.Len() + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + e.Write(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, >, and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into EDN and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.Write(s[start:]) + } + e.WriteByte('"') + e.needsDelim = false + return e.Len() - len0, nil +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, i := range index { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + t = t.Field(i).Type + } + return t +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + fnameType emitType + tagType tagType +} + +type emitType int + +const ( + emitSym emitType = iota + emitKey + emitString +) + +type tagType int + +const ( + tagUndefined tagType = iota + tagSet + tagMap + tagVec + tagList + tagRune +) + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from edn tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that edn should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + tag := sf.Tag.Get("edn") + if tag == "" && readCanUseJSONTag() { + tag = sf.Tag.Get("json") + } + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Add tagging rules: + var emit emitType + switch { + case opts.Contains("sym"): + emit = emitSym + case opts.Contains("str"): + emit = emitString + case opts.Contains("key"): + fallthrough + default: + emit = emitKey + } + // key, sym, str + + var tagType tagType // add tag rules + switch { + case opts.Contains("set"): + tagType = tagSet + case opts.Contains("map"): + tagType = tagMap + case opts.Contains("vector"): + tagType = tagVec + case opts.Contains("list"): + tagType = tagList + case opts.Contains("rune"): + tagType = tagRune + default: + tagType = tagUndefined + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + r := []rune(sf.Name) + r[0] = unicode.ToLower(r[0]) + name = string(r) + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + fnameType: emit, + tagType: tagType, + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with EDN tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// EDN tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var canUseJSONTag int32 + +func readCanUseJSONTag() bool { + return atomic.LoadInt32(&canUseJSONTag) == 1 +} + +// UseJSONAsFallback can be set to true to let go-edn parse structs with +// information from the `json` tag for encoding and decoding type fields if not +// the `edn` tag field is set. This is not threadsafe: Encoding and decoding +// happening while this is called may return results that mix json and non-json +// tag reading. Preferably you call this in an init() function to ensure it is +// either set or unset. +func UseJSONAsFallback(val bool) { + set := int32(0) + if val { + set = 1 + } + + // Here comes the funny stuff: Cache invalidation. Right now we lock and + // unlock these independently of eachother, so it's fine to lock them in this + // order. However, if we decide to change this later on, the only reasonable + // change would be that you may grab the encoderCache lock before the + // fieldCache lock. Therefore we do it in this order, although it should not + // matter strictly speaking. + encoderCache.Lock() + fieldCache.Lock() + atomic.StoreInt32(&canUseJSONTag, set) + fieldCache.m = nil + encoderCache.m = nil + fieldCache.Unlock() + encoderCache.Unlock() +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + couldUseJSON := readCanUseJSONTag() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if couldUseJSON != readCanUseJSONTag() { + // cache has been invalidated, unlock and retry recursively. + fieldCache.Unlock() + return cachedTypeFields(t) + } + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/olympos.io/encoding/edn/extras.go b/vendor/olympos.io/encoding/edn/extras.go new file mode 100644 index 00000000..000ca232 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/extras.go @@ -0,0 +1,177 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "reflect" + "strconv" + "unicode/utf8" +) + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, v +} + +// unquote converts a quoted EDN string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote is invalid + case c == '"': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/olympos.io/encoding/edn/fold.go b/vendor/olympos.io/encoding/edn/fold.go new file mode 100644 index 00000000..f6ff95b1 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/fold.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/olympos.io/encoding/edn/lexer.go b/vendor/olympos.io/encoding/edn/lexer.go new file mode 100644 index 00000000..b8592ca2 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/lexer.go @@ -0,0 +1,603 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "strconv" + u "unicode" +) + +type lexState int + +const ( + lexCont = lexState(iota) // continue reading + lexIgnore // values you can ignore, just whitespace and comments atm + lexEnd // value ended with input given in + lexEndPrev // value ended with previous input + lexError // erroneous input +) + +type tokenType int + +const ( // value types from lexer + tokenSymbol = tokenType(iota) + tokenKeyword + tokenString + tokenInt + tokenFloat + tokenTag + tokenChar + tokenListStart + tokenListEnd + tokenVectorStart + tokenVectorEnd + tokenMapStart + tokenMapEnd + tokenSetStart + tokenDiscard + + tokenError +) + +func (t tokenType) String() string { + switch t { + case tokenSymbol: + return "symbol" + case tokenKeyword: + return "keyword" + case tokenString: + return "string" + case tokenInt: + return "integer" + case tokenFloat: + return "float" + case tokenTag: + return "tag" + case tokenChar: + return "character" + case tokenListStart: + return "list start" + case tokenListEnd: + return "list end" + case tokenVectorStart: + return "vector start" + case tokenVectorEnd: + return "vector end" + case tokenMapStart: + return "map start" + case tokenMapEnd: + return "map/set end" + case tokenSetStart: + return "set start" + case tokenDiscard: + return "discard token" + case tokenError: + return "error" + default: + return "[unknown]" + } +} + +const tokenSetEnd = tokenMapEnd // sets ends the same way as maps do + +// A SyntaxError is a description of an EDN syntax error. +type SyntaxError struct { + msg string // description of error + Offset int64 // error occurred after reading Offset bytes +} + +func (e *SyntaxError) Error() string { + return e.msg +} + +func okSymbolFirst(r rune) bool { + switch r { + case '.', '*', '+', '!', '-', '_', '?', '$', '%', '&', '=', '<', '>': + return true + } + return false +} + +func okSymbol(r rune) bool { + switch r { + case '.', '*', '+', '!', '-', '_', '?', '$', '%', '&', '=', '<', '>', ':', '#', '\'': + return true + } + return false +} + +func isWhitespace(r rune) bool { + return u.IsSpace(r) || r == ',' +} + +type lexer struct { + state func(rune) lexState + err error + position int64 + token tokenType + + count int // counter is used in some functions within the lexer + expecting []rune // expecting is used to avoid duplication when we expect e.g. \newline +} + +func (l *lexer) reset() { + l.state = l.stateBegin + l.token = tokenType(-1) + l.err = nil +} + +func (l *lexer) eof() lexState { + if l.err != nil { + return lexError + } + lt := l.state(' ') + if lt == lexCont { + l.err = &SyntaxError{"unexpected end of EDN input", l.position} + lt = lexError + } + if l.err != nil { + return lexError + } + if lt == lexEndPrev { + return lexEnd + } + return lt +} + +func (l *lexer) stateBegin(r rune) lexState { + switch { + case isWhitespace(r): + return lexIgnore + case r == '{': + l.token = tokenMapStart + return lexEnd + case r == '}': + l.token = tokenMapEnd + return lexEnd + case r == '[': + l.token = tokenVectorStart + return lexEnd + case r == ']': + l.token = tokenVectorEnd + return lexEnd + case r == '(': + l.token = tokenListStart + return lexEnd + case r == ')': + l.token = tokenListEnd + return lexEnd + case r == '#': + l.state = l.statePound + return lexCont + case r == ':': + l.state = l.stateKeyword + return lexCont + case r == '/': // ohh, the lovely slash edge case + l.token = tokenSymbol + l.state = l.stateEndLit + return lexCont + case r == '+': + l.state = l.statePos + return lexCont + case r == '-': + l.state = l.stateNeg + return lexCont + case r == '.': + l.token = tokenSymbol + l.state = l.stateDotPre + return lexCont + case r == '"': + l.state = l.stateInString + return lexCont + case r == '\\': + l.state = l.stateChar + return lexCont + case okSymbolFirst(r) || u.IsLetter(r): + l.token = tokenSymbol + l.state = l.stateSym + return lexCont + case '0' < r && r <= '9': + l.state = l.state1 + return lexCont + case r == '0': + l.state = l.state0 + return lexCont + case r == ';': + l.state = l.stateComment + return lexIgnore + } + return l.error(r, "- unexpected rune") +} + +func (l *lexer) stateComment(r rune) lexState { + if r == '\n' { + l.state = l.stateBegin + } + return lexIgnore +} + +func (l *lexer) stateEndLit(r rune) lexState { + if isWhitespace(r) || r == '"' || r == '{' || r == '[' || r == '(' || r == ')' || r == ']' || r == '}' || r == '\\' || r == ';' { + return lexEndPrev + } + return l.error(r, "- unexpected rune after legal "+l.token.String()) +} + +func (l *lexer) stateKeyword(r rune) lexState { + switch { + case r == ':': + l.state = l.stateError + l.err = &SyntaxError{"EDN does not support namespace-qualified keywords", l.position} + return lexError + case r == '/': + l.state = l.stateError + l.err = &SyntaxError{"keywords cannot begin with /", l.position} + return lexError + case okSymbol(r) || u.IsLetter(r) || ('0' <= r && r <= '9'): + l.token = tokenKeyword + l.state = l.stateSym + return lexCont + } + return l.error(r, "after keyword start") +} + +// examples: 'foo' 'bar' +// we reuse this from the keyword states, so we don't set token at the end, +// but before we call this +func (l *lexer) stateSym(r rune) lexState { + switch { + case okSymbol(r) || u.IsLetter(r) || ('0' <= r && r <= '9'): + l.state = l.stateSym + return lexCont + case r == '/': + l.state = l.stateSlash + return lexCont + } + return l.stateEndLit(r) +} + +// example: 'foo/' +func (l *lexer) stateSlash(r rune) lexState { + switch { + case okSymbol(r) || u.IsLetter(r) || ('0' <= r && r <= '9'): + l.state = l.statePostSlash + return lexCont + } + return l.error(r, "directly after '/' in namespaced symbol") +} + +// example : 'foo/bar' +func (l *lexer) statePostSlash(r rune) lexState { + switch { + case okSymbol(r) || u.IsLetter(r) || ('0' <= r && r <= '9'): + l.state = l.statePostSlash + return lexCont + } + return l.stateEndLit(r) +} + +// example: '-' +func (l *lexer) stateNeg(r rune) lexState { + switch { + case r == '0': + l.state = l.state0 + return lexCont + case '1' <= r && r <= '9': + l.state = l.state1 + return lexCont + case okSymbol(r) || u.IsLetter(r): + l.token = tokenSymbol + l.state = l.stateSym + return lexCont + case r == '/': + l.token = tokenSymbol + l.state = l.stateSlash + return lexCont + } + l.token = tokenSymbol + return l.stateEndLit(r) +} + +// example: '+' +func (l *lexer) statePos(r rune) lexState { + switch { + case r == '0': + l.state = l.state0 + return lexCont + case '1' <= r && r <= '9': + l.state = l.state1 + return lexCont + case okSymbol(r) || u.IsLetter(r): + l.token = tokenSymbol + l.state = l.stateSym + return lexCont + case r == '/': + l.token = tokenSymbol + l.state = l.stateSlash + return lexCont + } + l.token = tokenSymbol + return l.stateEndLit(r) +} + +// value is '0' +func (l *lexer) state0(r rune) lexState { + switch { + case r == '.': + l.state = l.stateDot + return lexCont + case r == 'e' || r == 'E': + l.state = l.stateE + return lexCont + case r == 'M': // bigdecimal + l.token = tokenFloat + l.state = l.stateEndLit + return lexCont // must be ws or delimiter afterwards + case r == 'N': // bigint + l.token = tokenInt + l.state = l.stateEndLit + return lexCont // must be ws or delimiter afterwards + } + l.token = tokenInt + return l.stateEndLit(r) +} + +// anything but a result starting with 0. example '10', '34' +func (l *lexer) state1(r rune) lexState { + if '0' <= r && r <= '9' { + return lexCont + } + return l.state0(r) +} + +// example: '.', can only receive non-numerics here +func (l *lexer) stateDotPre(r rune) lexState { + switch { + case okSymbol(r) || u.IsLetter(r): + l.token = tokenSymbol + l.state = l.stateSym + return lexCont + case r == '/': + l.token = tokenSymbol + l.state = l.stateSlash + return lexCont + } + return l.stateEndLit(r) +} + +// after reading numeric values plus '.', example: '12.' +func (l *lexer) stateDot(r rune) lexState { + if '0' <= r && r <= '9' { + l.state = l.stateDot0 + return lexCont + } + // TODO (?): The spec says that there must be numbers after the dot, yet + // (clojure.edn/read-string "1.e1") returns 10.0 + return l.error(r, "after decimal point in numeric literal") +} + +// after reading numeric values plus '.', example: '12.34' +func (l *lexer) stateDot0(r rune) lexState { + switch { + case '0' <= r && r <= '9': + return lexCont + case r == 'e' || r == 'E': + l.state = l.stateE + return lexCont + case r == 'M': + l.token = tokenFloat + l.state = l.stateEndLit + return lexCont + } + l.token = tokenFloat + return l.stateEndLit(r) +} + +// stateE is the state after reading the mantissa and e in a number, +// such as after reading `314e` or `0.314e`. +func (l *lexer) stateE(r rune) lexState { + if r == '+' || r == '-' { + l.state = l.stateESign + return lexCont + } + return l.stateESign(r) +} + +// stateESign is the state after reading the mantissa, e, and sign in a number, +// such as after reading `314e-` or `0.314e+`. +func (l *lexer) stateESign(r rune) lexState { + if '0' <= r && r <= '9' { + l.state = l.stateE0 + return lexCont + } + return l.error(r, "in exponent of numeric literal") +} + +// stateE0 is the state after reading the mantissa, e, optional sign, +// and at least one digit of the exponent in a number, +// such as after reading `314e-2` or `0.314e+1` or `3.14e0`. +func (l *lexer) stateE0(r rune) lexState { + if '0' <= r && r <= '9' { + return lexCont + } + if r == 'M' { + l.token = tokenFloat + l.state = l.stateEndLit + return lexCont + } + l.token = tokenFloat + return l.stateEndLit(r) +} + +var ( + newlineRunes = []rune("newline") + returnRunes = []rune("return") + spaceRunes = []rune("space") + tabRunes = []rune("tab") + formfeedRunes = []rune("formfeed") +) + +// stateChar after a backslash ('\') +func (l *lexer) stateChar(r rune) lexState { + switch { + // oh my, I'm so happy that none of these share the same prefix. + case r == 'n': + l.count = 1 + l.expecting = newlineRunes + l.state = l.stateSpecialChar + return lexCont + case r == 'r': + l.count = 1 + l.expecting = returnRunes + l.state = l.stateSpecialChar + return lexCont + case r == 's': + l.count = 1 + l.expecting = spaceRunes + l.state = l.stateSpecialChar + return lexCont + case r == 't': + l.count = 1 + l.expecting = tabRunes + l.state = l.stateSpecialChar + return lexCont + case r == 'f': + l.count = 1 + l.expecting = formfeedRunes + l.state = l.stateSpecialChar + return lexCont + case r == 'u': + l.count = 0 + l.state = l.stateUnicodeChar + return lexCont + case isWhitespace(r): + l.state = l.stateError + l.err = &SyntaxError{"backslash cannot be followed by whitespace", l.position} + return lexError + } + // default is single name character + l.token = tokenChar + l.state = l.stateEndLit + return lexCont +} + +func (l *lexer) stateSpecialChar(r rune) lexState { + if r == l.expecting[l.count] { + l.count++ + if l.count == len(l.expecting) { + l.token = tokenChar + l.state = l.stateEndLit + return lexCont + } + return lexCont + } + if l.count != 1 { + return l.error(r, "after start of special character") + } + // it is likely just a normal character, like 'n' or 't' + l.token = tokenChar + return l.stateEndLit(r) +} + +func (l *lexer) stateUnicodeChar(r rune) lexState { + if '0' <= r && r <= '9' || 'a' <= r && r <= 'f' || 'A' <= r && r <= 'F' { + l.count++ + if l.count == 4 { + l.token = tokenChar + l.state = l.stateEndLit + } + return lexCont + } + if l.count != 0 { + return l.error(r, "after start of unicode character") + } + // likely just '\u' + l.token = tokenChar + return l.stateEndLit(r) +} + +// stateInString is the state after reading `"`. +func (l *lexer) stateInString(r rune) lexState { + if r == '"' { + l.token = tokenString + return lexEnd + } + if r == '\\' { + l.state = l.stateInStringEsc + return lexCont + } + return lexCont +} + +// stateInStringEsc is the state after reading `"\` during a quoted string. +func (l *lexer) stateInStringEsc(r rune) lexState { + switch r { + case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': + l.state = l.stateInString + return lexCont + case 'u': + l.state = l.stateInStringEscU + l.count = 0 + return lexCont + } + return l.error(r, "in string escape code") +} + +// stateInStringEscU is the state after reading `"\u` and l.count elements in a +// quoted string. +func (l *lexer) stateInStringEscU(r rune) lexState { + if '0' <= r && r <= '9' || 'a' <= r && r <= 'f' || 'A' <= r && r <= 'F' { + l.count++ + if l.count == 4 { + l.state = l.stateInString + } + return lexCont + } + // numbers + return l.error(r, "in \\u hexadecimal character escape") +} + +// after reading the character '#' +func (l *lexer) statePound(r rune) lexState { + switch { + case r == '_': + l.token = tokenDiscard + return lexEnd + case r == '{': + l.token = tokenSetStart + return lexEnd + case u.IsLetter(r): + l.token = tokenTag + l.state = l.stateSym + return lexCont + } + return l.error(r, `after token starting with "#"`) +} + +func (l *lexer) stateError(r rune) lexState { + return lexError +} + +// error records an error and switches to the error state. +func (l *lexer) error(r rune, context string) lexState { + l.state = l.stateError + l.err = &SyntaxError{"invalid character " + quoteRune(r) + " " + context, l.position} + return lexError +} + +// quoteRune formats r as a quoted rune literal +func quoteRune(r rune) string { + // special cases - different from quoted strings + if r == '\'' { + return `'\''` + } + if r == '"' { + return `'"'` + } + + // use quoted string with different quotation marks + s := strconv.Quote(string(r)) + return "'" + s[1:len(s)-1] + "'" +} diff --git a/vendor/olympos.io/encoding/edn/pprint.go b/vendor/olympos.io/encoding/edn/pprint.go new file mode 100644 index 00000000..57f44afe --- /dev/null +++ b/vendor/olympos.io/encoding/edn/pprint.go @@ -0,0 +1,245 @@ +// Copyright 2015 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bytes" + "io" + "unicode/utf8" +) + +var ( + // we can't call it spaceBytes not to conflict with decode.go's spaceBytes. + spaceOutputBytes = []byte(" ") + commaOutputBytes = []byte(",") +) + +func newline(dst io.Writer, prefix, indent string, depth int) { + dst.Write([]byte{'\n'}) + dst.Write([]byte(prefix)) + for i := 0; i < depth; i++ { + dst.Write([]byte(indent)) + } +} + +// Indent writes to dst an indented form of the EDN-encoded src. Each EDN +// collection begins on a new, indented line beginning with prefix followed by +// one or more copies of indent according to the indentation nesting. The data +// written to dst does not begin with the prefix nor any indentation, and has +// no trailing newline, to make it easier to embed inside other formatted EDN +// data. +// +// Indent filters away whitespace, including comments and discards. +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + origLen := dst.Len() + err := IndentStream(dst, bytes.NewBuffer(src), prefix, indent) + if err != nil { + dst.Truncate(origLen) + } + return err +} + +// IndentStream is an implementation of PPrint for generic readers and writers +func IndentStream(dst io.Writer, src io.Reader, prefix, indent string) error { + var lex lexer + lex.reset() + tokStack := newTokenStack() + curType := tokenError + curSize := 0 + d := NewDecoder(src) + depth := 0 + for { + bs, tt, err := d.nextToken() + if err != nil { + return err + } + err = tokStack.push(tt) + if err != nil { + return err + } + prevType := curType + prevSize := curSize + if len(tokStack.toks) > 0 { + curType = tokStack.peek() + curSize = tokStack.peekCount() + } + switch tt { + case tokenMapStart, tokenVectorStart, tokenListStart, tokenSetStart: + if prevType == tokenMapStart { + dst.Write([]byte{' '}) + } else if depth > 0 { + newline(dst, prefix, indent, depth) + } + dst.Write(bs) + depth++ + case tokenVectorEnd, tokenListEnd, tokenMapEnd: // tokenSetEnd == tokenMapEnd + depth-- + if prevSize > 0 { // suppress indent for empty collections + newline(dst, prefix, indent, depth) + } + // all of these are of length 1 in bytes, so utilise this for perf + dst.Write(bs) + case tokenTag: + // need to know what the previous type was. + switch prevType { + case tokenMapStart: + if prevSize%2 == 0 { // If previous size modulo 2 is equal to 0, we're a key + if prevSize > 0 { + dst.Write(commaOutputBytes) + } + newline(dst, prefix, indent, depth) + } else { // We're a value, add a space after the key + dst.Write(spaceOutputBytes) + } + dst.Write(bs) + dst.Write(spaceOutputBytes) + case tokenSetStart, tokenVectorStart, tokenListStart: + newline(dst, prefix, indent, depth) + dst.Write(bs) + dst.Write(spaceOutputBytes) + default: // tokenError or nested tag + dst.Write(bs) + dst.Write(spaceOutputBytes) + } + default: + switch prevType { + case tokenMapStart: + if prevSize%2 == 0 { // If previous size modulo 2 is equal to 0, we're a key + if prevSize > 0 { + dst.Write(commaOutputBytes) + } + newline(dst, prefix, indent, depth) + } else { // We're a value, add a space after the key + dst.Write(spaceOutputBytes) + } + dst.Write(bs) + case tokenSetStart, tokenVectorStart, tokenListStart: + newline(dst, prefix, indent, depth) + dst.Write(bs) + default: // toplevel or nested tag. This should collapse the whole tag tower + dst.Write(bs) + } + } + if tokStack.done() { + break + } + } + return nil +} + +// PPrintOpts is a configuration map for PPrint. The values in this struct has +// no effect as of now. +type PPrintOpts struct { + RightMargin int + MiserWidth int +} + +func pprintIndent(dst io.Writer, shift int) { + spaces := make([]byte, shift+1) + + spaces[0] = '\n' + + // TODO: This may be slower than caching the size as a byte slice + for i := 1; i <= shift; i++ { + spaces[i] = ' ' + } + + dst.Write(spaces) +} + +// PPrint writes to dst an indented form of the EDN-encoded src. This +// implementation attempts to write idiomatic/readable EDN values, in a fashion +// close to (but not quite equal to) clojure.pprint/pprint. +// +// PPrint filters away whitespace, including comments and discards. +func PPrint(dst *bytes.Buffer, src []byte, opt *PPrintOpts) error { + origLen := dst.Len() + err := PPrintStream(dst, bytes.NewBuffer(src), opt) + if err != nil { + dst.Truncate(origLen) + } + return err +} + +// PPrintStream is an implementation of PPrint for generic readers and writers +func PPrintStream(dst io.Writer, src io.Reader, opt *PPrintOpts) error { + var lex lexer + var col, prevCollStart, curSize int + var prevColl bool + + lex.reset() + tokStack := newTokenStack() + + shift := make([]int, 1, 8) // pre-allocate some space + curType := tokenError + d := NewDecoder(src) + + for { + bs, tt, err := d.nextToken() + if err != nil { + return err + } + err = tokStack.push(tt) + if err != nil { + return err + } + prevType := curType + prevSize := curSize + if len(tokStack.toks) > 0 { + curType = tokStack.peek() + curSize = tokStack.peekCount() + } + // Indentation + switch tt { + case tokenVectorEnd, tokenListEnd, tokenMapEnd: + default: + switch prevType { + case tokenMapStart: + if prevSize%2 == 0 && prevSize > 0 { + dst.Write(commaOutputBytes) + pprintIndent(dst, shift[len(shift)-1]) + col = shift[len(shift)-1] + } else if prevSize%2 == 1 { // We're a value, add a space after the key + dst.Write(spaceOutputBytes) + col++ + } + case tokenSetStart, tokenVectorStart, tokenListStart: + if prevColl { + // begin on new line where prevColl started + // This will look so strange for heterogenous maps. + pprintIndent(dst, prevCollStart) + col = prevCollStart + } else if prevSize > 0 { + dst.Write(spaceOutputBytes) + col++ + } + } + } + switch tt { + case tokenMapStart, tokenVectorStart, tokenListStart, tokenSetStart: + dst.Write(bs) + col += len(bs) // either 2 or 1 + shift = append(shift, col) // we only use maps for now, but we'll utilise this more thoroughly later on + case tokenVectorEnd, tokenListEnd, tokenMapEnd: // tokenSetEnd == tokenMapEnd + dst.Write(bs) // all of these are of length 1 in bytes, so this is ok + prevCollStart = shift[len(shift)-1] - 1 + shift = shift[:len(shift)-1] + case tokenTag: + bslen := utf8.RuneCount(bs) + dst.Write(bs) + dst.Write(spaceOutputBytes) + col += bslen + 1 + default: + bslen := utf8.RuneCount(bs) + dst.Write(bs) + col += bslen + } + prevColl = (tt == tokenMapEnd || tt == tokenVectorEnd || tt == tokenListEnd) + if tokStack.done() { + break + } + } + return nil +} diff --git a/vendor/olympos.io/encoding/edn/tags.go b/vendor/olympos.io/encoding/edn/tags.go new file mode 100644 index 00000000..b9d5c46a --- /dev/null +++ b/vendor/olympos.io/encoding/edn/tags.go @@ -0,0 +1,44 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "strings" +) + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/olympos.io/encoding/edn/types.go b/vendor/olympos.io/encoding/edn/types.go new file mode 100644 index 00000000..e45b2ee5 --- /dev/null +++ b/vendor/olympos.io/encoding/edn/types.go @@ -0,0 +1,149 @@ +// Copyright 2015-2017 Jean Niklas L'orange. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edn + +import ( + "bufio" + "bytes" + "errors" + "fmt" +) + +// RawMessage is a raw encoded, but valid, EDN value. It implements Marshaler +// and Unmarshaler and can be used to delay EDN decoding or precompute an EDN +// encoding. +type RawMessage []byte + +// MarshalEDN returns m as the EDN encoding of m. +func (m RawMessage) MarshalEDN() ([]byte, error) { + if m == nil { + return []byte("nil"), nil + } + return m, nil +} + +// UnmarshalEDN sets *m to a copy of data. +func (m *RawMessage) UnmarshalEDN(data []byte) error { + if m == nil { + return errors.New("edn.RawMessage: UnmarshalEDN on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} + +// A Keyword is an EDN keyword without : prepended in front. +type Keyword string + +func (k Keyword) String() string { + return fmt.Sprintf(":%s", string(k)) +} + +func (k Keyword) MarshalEDN() ([]byte, error) { + return []byte(k.String()), nil +} + +// A Symbol is an EDN symbol. +type Symbol string + +func (s Symbol) String() string { + return string(s) +} + +func (s Symbol) MarshalEDN() ([]byte, error) { + return []byte(s), nil +} + +// A Tag is a tagged value. The Tagname represents the name of the tag, and the +// Value is the value of the element. +type Tag struct { + Tagname string + Value interface{} +} + +func (t Tag) String() string { + return fmt.Sprintf("#%s %v", t.Tagname, t.Value) +} + +func (t Tag) MarshalEDN() ([]byte, error) { + str := []byte(fmt.Sprintf(`#%s `, t.Tagname)) + b, err := Marshal(t.Value) + if err != nil { + return nil, err + } + return append(str, b...), nil +} + +func (t *Tag) UnmarshalEDN(bs []byte) error { + // read actual tag, using the lexer. + var lex lexer + lex.reset() + buf := bufio.NewReader(bytes.NewBuffer(bs)) + start := 0 + endTag := 0 +tag: + for { + r, rlen, err := buf.ReadRune() + if err != nil { + return err + } + + ls := lex.state(r) + switch ls { + case lexIgnore: + start += rlen + endTag += rlen + case lexError: + return lex.err + case lexEndPrev: + break tag + case lexEnd: // unexpected, assuming tag which is not ending with lexEnd + return errUnexpected + case lexCont: + endTag += rlen + } + } + t.Tagname = string(bs[start+1 : endTag]) + return Unmarshal(bs[endTag:], &t.Value) +} + +// A Rune type is a wrapper for a rune. It can be used to encode runes as +// characters instead of int32 values. +type Rune rune + +func (r Rune) MarshalEDN() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 10)) + encodeRune(buf, rune(r)) + return buf.Bytes(), nil +} + +func encodeRune(buf *bytes.Buffer, r rune) { + const hex = "0123456789abcdef" + if !isWhitespace(r) { + buf.WriteByte('\\') + buf.WriteRune(r) + } else { + switch r { + case '\b': + buf.WriteString(`\backspace`) + case '\f': + buf.WriteString(`\formfeed`) + case '\n': + buf.WriteString(`\newline`) + case '\r': + buf.WriteString(`\return`) + case '\t': + buf.WriteString(`\tab`) + case ' ': + buf.WriteString(`\space`) + default: + buf.WriteByte('\\') + buf.WriteByte('u') + buf.WriteByte(hex[r>>12&0xF]) + buf.WriteByte(hex[r>>8&0xF]) + buf.WriteByte(hex[r>>4&0xF]) + buf.WriteByte(hex[r&0xF]) + } + } +}