diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk index e7f1b59055..6227d0c6bf 100644 --- a/.bingo/Variables.mk +++ b/.bingo/Variables.mk @@ -29,6 +29,12 @@ $(BINGO): $(BINGO_DIR)/bingo.mod @echo "(re)installing $(GOBIN)/bingo-v0.9.0" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.9.0 "github.com/bwplotka/bingo" +CAPNPC_GO := $(GOBIN)/capnpc-go-v3.0.1-alpha.2.0.20240830165715-46ccd63a72af +$(CAPNPC_GO): $(BINGO_DIR)/capnpc-go.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/capnpc-go-v3.0.1-alpha.2.0.20240830165715-46ccd63a72af" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=capnpc-go.mod -o=$(GOBIN)/capnpc-go-v3.0.1-alpha.2.0.20240830165715-46ccd63a72af "capnproto.org/go/capnp/v3/capnpc-go" + FAILLINT := $(GOBIN)/faillint-v1.13.0 $(FAILLINT): $(BINGO_DIR)/faillint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. diff --git a/.bingo/capnpc-go.mod b/.bingo/capnpc-go.mod new file mode 100644 index 0000000000..51f3bf45c4 --- /dev/null +++ b/.bingo/capnpc-go.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.23.1 + +require capnproto.org/go/capnp/v3 v3.0.1-alpha.2.0.20240830165715-46ccd63a72af // capnpc-go diff --git a/.bingo/capnpc-go.sum b/.bingo/capnpc-go.sum new file mode 100644 index 0000000000..6e97c04068 --- /dev/null +++ b/.bingo/capnpc-go.sum @@ -0,0 +1,6 @@ +capnproto.org/go/capnp/v3 v3.0.1-alpha.2.0.20240830165715-46ccd63a72af h1:A5wxH0ZidOtYYUGjhtBaRuB87M73bGfc06uWB8sHpg0= +capnproto.org/go/capnp/v3 v3.0.1-alpha.2.0.20240830165715-46ccd63a72af/go.mod h1:2vT5D2dtG8sJGEoEKU17e+j7shdaYp1Myl8X03B3hmc= +github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 h1:d5EKgQfRQvO97jnISfR89AiCCCJMwMFoSxUiU0OGCRU= +github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381/go.mod h1:OU76gHeRo8xrzGJU3F3I1CqX1ekM8dfJw0+wPeMwnp0= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= diff --git a/.bingo/variables.env b/.bingo/variables.env index a7e7655aa8..c8cead57d2 100644 --- a/.bingo/variables.env +++ b/.bingo/variables.env @@ -12,6 +12,8 @@ ALERTMANAGER="${GOBIN}/alertmanager-v0.27.0" BINGO="${GOBIN}/bingo-v0.9.0" +CAPNPC_GO="${GOBIN}/capnpc-go-v3.0.1-alpha.2.0.20240830165715-46ccd63a72af" + FAILLINT="${GOBIN}/faillint-v1.13.0" GOIMPORTS="${GOBIN}/goimports-v0.23.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index feb7a7d772..3e1314b242 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,7 +19,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#7658](https://github.com/thanos-io/thanos/pull/7658) Store: Fix panic because too small buffer in pool. - [#7643](https://github.com/thanos-io/thanos/pull/7643) Receive: fix thanos_receive_write_{timeseries,samples} stats - [#7644](https://github.com/thanos-io/thanos/pull/7644) fix(ui): add null check to find overlapping blocks logic -- [#7814](https://github.com/thanos-io/thanos/pull/7814) Store: label_values: if matchers contain __name__=="something", do not add != "" to fetch less postings. +- [#7814](https://github.com/thanos-io/thanos/pull/7814) Store: label_values: if matchers contain **name**=="something", do not add != "" to fetch less postings. - [#7679](https://github.com/thanos-io/thanos/pull/7679) Query: respect store.limit.* flags when evaluating queries - [#7821](https://github.com/thanos-io/thanos/pull/7679) Query/Receive: Fix coroutine leak introduced in https://github.com/thanos-io/thanos/pull/7796. @@ -29,6 +29,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#7429](https://github.com/thanos-io/thanos/pull/7429): Reloader: introduce `TolerateEnvVarExpansionErrors` to allow suppressing errors when expanding environment variables in the configuration file. When set, this will ensure that the reloader won't consider the operation to fail when an unset environment variable is encountered. Note that all unset environment variables are left as is, whereas all set environment variables are expanded as usual. - [#7560](https://github.com/thanos-io/thanos/pull/7560) Query: Added the possibility of filtering rules by rule_name, rule_group or file to HTTP api. - [#7652](https://github.com/thanos-io/thanos/pull/7652) Store: Implement metadata API limit in stores. +- [#7659](https://github.com/thanos-io/thanos/pull/7659) Receive: Add support for replication using [Cap'n Proto](https://capnproto.org/). This protocol has a lower CPU and memory footprint, which leads to a reduction in resource usage in Receivers. Before enabling it, make sure that all receivers are updated to a version which supports this replication method. ### Changed diff --git a/Makefile b/Makefile index 624220a6a6..fd3f4acc61 100644 --- a/Makefile +++ b/Makefile @@ -295,6 +295,13 @@ proto: ## Generates Go files from Thanos proto files. proto: check-git $(GOIMPORTS) $(PROTOC) $(PROTOC_GEN_GOGOFAST) @GOIMPORTS_BIN="$(GOIMPORTS)" PROTOC_BIN="$(PROTOC)" PROTOC_GEN_GOGOFAST_BIN="$(PROTOC_GEN_GOGOFAST)" PROTOC_VERSION="$(PROTOC_VERSION)" scripts/genproto.sh +.PHONY: capnp +capnp: ## Generates Go files from Thanos capnproto files. +capnp: check-git + capnp compile -I $(shell go list -m -f '{{.Dir}}' capnproto.org/go/capnp/v3)/std -ogo pkg/receive/writecapnp/write_request.capnp + @$(GOIMPORTS) -w pkg/receive/writecapnp/write_request.capnp.go + go run ./scripts/copyright + .PHONY: tarballs-release tarballs-release: ## Build tarballs. tarballs-release: $(PROMU) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 97054482db..a3b638de76 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -5,6 +5,8 @@ package main import ( "context" + "fmt" + "net" "os" "path" "strings" @@ -271,6 +273,7 @@ func runReceive( Limiter: limiter, AsyncForwardWorkerCount: conf.asyncForwardWorkerCount, + ReplicationProtocol: receive.ReplicationProtocol(conf.replicationProtocol), }) grpcProbe := prober.NewGRPC() @@ -465,6 +468,26 @@ func runReceive( } } + { + capNProtoWriter := receive.NewCapNProtoWriter(logger, dbs, &receive.CapNProtoWriterOptions{ + TooFarInFutureTimeWindow: int64(time.Duration(*conf.tsdbTooFarInFutureTimeWindow)), + }) + handler := receive.NewCapNProtoHandler(logger, capNProtoWriter) + listener, err := net.Listen("tcp", conf.replicationAddr) + if err != nil { + return err + } + server := receive.NewCapNProtoServer(listener, handler, logger) + g.Add(func() error { + return server.ListenAndServe() + }, func(err error) { + server.Shutdown() + if err := listener.Close(); err != nil { + level.Warn(logger).Log("msg", "Cap'n Proto server did not shut down gracefully", "err", err.Error()) + } + }) + } + level.Info(logger).Log("msg", "starting receiver") return nil } @@ -795,6 +818,7 @@ type receiveConfig struct { grpcConfig grpcConfig + replicationAddr string rwAddress string rwServerCert string rwServerKey string @@ -816,17 +840,18 @@ type receiveConfig struct { hashringsFileContent string hashringsAlgorithm string - refreshInterval *model.Duration - endpoint string - tenantHeader string - tenantField string - tenantLabelName string - defaultTenantID string - replicaHeader string - replicationFactor uint64 - forwardTimeout *model.Duration - maxBackoff *model.Duration - compression string + refreshInterval *model.Duration + endpoint string + tenantHeader string + tenantField string + tenantLabelName string + defaultTenantID string + replicaHeader string + replicationFactor uint64 + forwardTimeout *model.Duration + maxBackoff *model.Duration + compression string + replicationProtocol string tsdbMinBlockDuration *model.Duration tsdbMaxBlockDuration *model.Duration @@ -929,6 +954,13 @@ func (rc *receiveConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("receive.replication-factor", "How many times to replicate incoming write requests.").Default("1").Uint64Var(&rc.replicationFactor) + replicationProtocols := []string{string(receive.ProtobufReplication), string(receive.CapNProtoReplication)} + cmd.Flag("receive.replication-protocol", "The protocol to use for replicating remote-write requests. One of "+strings.Join(replicationProtocols, ", ")). + Default(string(receive.ProtobufReplication)). + EnumVar(&rc.replicationProtocol, replicationProtocols...) + + cmd.Flag("receive.capnproto-address", "Address for the Cap'n Proto server.").Default(fmt.Sprintf("0.0.0.0:%s", receive.DefaultCapNProtoPort)).StringVar(&rc.replicationAddr) + rc.forwardTimeout = extkingpin.ModelDuration(cmd.Flag("receive-forward-timeout", "Timeout for each forward request.").Default("5s").Hidden()) rc.maxBackoff = extkingpin.ModelDuration(cmd.Flag("receive-forward-max-backoff", "Maximum backoff for each forward fan-out request").Default("5s").Hidden()) diff --git a/docs/components/receive.md b/docs/components/receive.md index 52871f3696..86c67c2acd 100644 --- a/docs/components/receive.md +++ b/docs/components/receive.md @@ -26,6 +26,24 @@ If you are using the `hashmod` algorithm and wish to migrate to `ketama`, the si This algorithm uses a `hashmod` function over all labels to decide which receiver is responsible for a given timeseries. This is the default algorithm due to historical reasons. However, its usage for new Receive installations is discouraged since adding new Receiver nodes leads to series churn and memory usage spikes. +### Replication protocols + +By default, Receivers replicate data using Protobuf over gRPC. Deserializing protobuf-encoded messages can be resource-intensive and cause significant GC pressure. Alternatively, you can use [Cap'N Proto](https://capnproto.org/) for replication encoding and as the RPC framework. + +In order to enable this mode, you can use the `receive.replication-protocol=capnproto` option on the receiver. Thanos will try to infer the Cap'N Proto address of each peer in the hashring using the existing gRPC address. You can also explicitly set the Cap'N Proto as follows: + +```json +[ + { + "endpoints": [ + {"address": "node-1:10901", "capnproto_address": "node-1:19391"}, + {"address": "node-2:10901", "capnproto_address": "node-2:19391"}, + {"address": "node-3:10901", "capnproto_address": "node-3:19391"} + ] + } +] +``` + ### Hashring management and autoscaling in Kubernetes The [Thanos Receive Controller](https://github.com/observatorium/thanos-receive-controller) project aims to automate hashring management when running Thanos in Kubernetes. In combination with the Ketama hashring algorithm, this controller can also be used to keep hashrings up to date when Receivers are scaled automatically using an HPA or [Keda](https://keda.sh/). @@ -312,7 +330,8 @@ Please see the metric `thanos_receive_forward_delay_seconds` to see if you need The following formula is used for calculating quorum: -```go mdox-exec="sed -n '999,1008p' pkg/receive/handler.go" +```go mdox-exec="sed -n '1012,1022p' pkg/receive/handler.go" +// writeQuorum returns minimum number of replicas that has to confirm write success before claiming replication success. func (h *Handler) writeQuorum() int { // NOTE(GiedriusS): this is here because otherwise RF=2 doesn't make sense as all writes // would need to succeed all the time. Another way to think about it is when migrating @@ -392,6 +411,8 @@ Flags: Path to YAML file that contains object store configuration. See format details: https://thanos.io/tip/thanos/storage.md/#configuration + --receive.capnproto-address="0.0.0.0:19391" + Address for the Cap'n Proto server. --receive.default-tenant-id="default-tenant" Default tenant ID to use when none is provided via a header. @@ -438,6 +459,10 @@ Flags: --receive.replication-factor=1 How many times to replicate incoming write requests. + --receive.replication-protocol=protobuf + The protocol to use for replicating + remote-write requests. One of protobuf, + capnproto --receive.split-tenant-label-name="" Label name through which the request will be split into multiple tenants. This takes diff --git a/go.mod b/go.mod index 98c83d186a..1d510f078b 100644 --- a/go.mod +++ b/go.mod @@ -112,6 +112,7 @@ require ( ) require ( + capnproto.org/go/capnp/v3 v3.0.1-alpha.1 github.com/cortexproject/promqlsmith v0.0.0-20240506042652-6cfdd9739a5e github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 github.com/hashicorp/golang-lru/v2 v2.0.7 @@ -132,6 +133,7 @@ require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/cilium/ebpf v0.11.0 // indirect + github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/go-licenser v0.3.1 // indirect diff --git a/go.sum b/go.sum index 0e75a24782..b20edece58 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +capnproto.org/go/capnp/v3 v3.0.1-alpha.1 h1:hYEclwXEKsnu+PdHASdx3nLP0fC9kZnR+x1CEvMp9ck= +capnproto.org/go/capnp/v3 v3.0.1-alpha.1/go.mod h1:B+ZjwFmHwTYv201x6CdIo7MmDC/TROJDa00kbjTnv1s= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1496,6 +1498,8 @@ github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 h1:d5EKgQfRQvO97jnISfR89AiCCCJMwMFoSxUiU0OGCRU= +github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381/go.mod h1:OU76gHeRo8xrzGJU3F3I1CqX1ekM8dfJw0+wPeMwnp0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -2094,6 +2098,8 @@ github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -2249,6 +2255,10 @@ github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31 h1:xPaP58g github.com/thanos-io/promql-engine v0.0.0-20240921092401-37747eddbd31/go.mod h1:wx0JlRZtsB2S10JYUgeg5GqLfMxw31SzArP+28yyE00= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= +github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= diff --git a/pkg/receive/capnp_server.go b/pkg/receive/capnp_server.go new file mode 100644 index 0000000000..34b406bfa9 --- /dev/null +++ b/pkg/receive/capnp_server.go @@ -0,0 +1,108 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package receive + +import ( + "context" + "net" + + "capnproto.org/go/capnp/v3" + "capnproto.org/go/capnp/v3/rpc" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/thanos-io/thanos/pkg/receive/writecapnp" + "github.com/thanos-io/thanos/pkg/runutil" +) + +type CapNProtoServer struct { + listener net.Listener + server writecapnp.Writer + logger log.Logger +} + +func NewCapNProtoServer(listener net.Listener, handler *CapNProtoHandler, logger log.Logger) *CapNProtoServer { + return &CapNProtoServer{ + listener: listener, + server: writecapnp.Writer_ServerToClient(handler), + logger: logger, + } +} + +func (c *CapNProtoServer) ListenAndServe() error { + for { + conn, err := c.listener.Accept() + if err != nil { + return err + } + + go func() { + defer runutil.CloseWithLogOnErr(c.logger, conn, "receive capnp conn") + rpcConn := rpc.NewConn(rpc.NewPackedStreamTransport(conn), &rpc.Options{ + // The BootstrapClient is the RPC interface that will be made available + // to the remote endpoint by default. + BootstrapClient: capnp.Client(c.server).AddRef(), + }) + <-rpcConn.Done() + }() + } +} + +func (c *CapNProtoServer) Shutdown() { + c.server.Release() +} + +type CapNProtoHandler struct { + writer *CapNProtoWriter + logger log.Logger +} + +func NewCapNProtoHandler(logger log.Logger, writer *CapNProtoWriter) *CapNProtoHandler { + return &CapNProtoHandler{logger: logger, writer: writer} +} + +func (c CapNProtoHandler) Write(ctx context.Context, call writecapnp.Writer_write) error { + call.Go() + wr, err := call.Args().Wr() + if err != nil { + return err + } + t, err := wr.Tenant() + if err != nil { + return err + } + req, err := writecapnp.NewRequest(wr) + if err != nil { + return err + } + defer req.Close() + + var errs writeErrors + errs.Add(c.writer.Write(ctx, t, req)) + if err := errs.ErrOrNil(); err != nil { + level.Debug(c.logger).Log("msg", "failed to handle request", "err", err) + result, allocErr := call.AllocResults() + if allocErr != nil { + return allocErr + } + + switch errors.Cause(err) { + case nil: + return nil + case errNotReady: + result.SetError(writecapnp.WriteError_unavailable) + case errUnavailable: + result.SetError(writecapnp.WriteError_unavailable) + case errConflict: + result.SetError(writecapnp.WriteError_alreadyExists) + case errBadReplica: + result.SetError(writecapnp.WriteError_invalidArgument) + default: + result.SetError(writecapnp.WriteError_internal) + } + } + + return nil +} diff --git a/pkg/receive/capnproto_server_bench_test.go b/pkg/receive/capnproto_server_bench_test.go new file mode 100644 index 0000000000..ba8d176dad --- /dev/null +++ b/pkg/receive/capnproto_server_bench_test.go @@ -0,0 +1,79 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package receive + +import ( + "context" + "os" + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/test/bufconn" + + "github.com/thanos-io/thanos/pkg/receive/writecapnp" + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/store/storepb/prompb" +) + +func BenchmarkCapNProtoServer_SingleConcurrentClient(b *testing.B) { + wreq := storepb.WriteRequest{ + Tenant: "example-tenant", + Timeseries: []prompb.TimeSeries{ + { + Labels: []labelpb.ZLabel{ + {Name: "__name__", Value: "up"}, + {Name: "job", Value: "prometheus"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 1, Value: 1}, + {Timestamp: 2, Value: 2}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "__name__", Value: "up"}, + {Name: "job", Value: "thanos"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 3, Value: 3}, + {Timestamp: 4, Value: 4}, + }, + }, + }, + } + + var ( + writer = NewCapNProtoWriter( + log.NewNopLogger(), + newFakeTenantAppendable( + &fakeAppendable{appender: newFakeAppender(nil, nil, nil)}), + &CapNProtoWriterOptions{}, + ) + listener = bufconn.Listen(1024) + handler = NewCapNProtoHandler(log.NewNopLogger(), writer) + srv = NewCapNProtoServer(listener, handler, log.NewNopLogger()) + ) + go func() { + _ = srv.ListenAndServe() + }() + defer srv.Shutdown() + + const numIterations = 10000 + var totalWrites float64 + b.ResetTimer() + b.ReportAllocs() + client := writecapnp.NewRemoteWriteClient(listener, log.NewLogfmtLogger(os.Stdout)) + for i := 0; i < b.N; i++ { + for j := 0; j < numIterations; j++ { + _, err := client.RemoteWrite(context.Background(), &wreq) + require.NoError(b, err) + } + totalWrites += numIterations + } + require.NoError(b, client.Close()) + require.NoError(b, listener.Close()) + b.ReportMetric(totalWrites, "total_writes") +} diff --git a/pkg/receive/capnproto_server_test.go b/pkg/receive/capnproto_server_test.go new file mode 100644 index 0000000000..e054a73ad9 --- /dev/null +++ b/pkg/receive/capnproto_server_test.go @@ -0,0 +1,76 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package receive + +import ( + "context" + "os" + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/test/bufconn" + + "github.com/thanos-io/thanos/pkg/receive/writecapnp" + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +func TestCapNProtoServer_SingleConcurrentClient(t *testing.T) { + var ( + writer = NewCapNProtoWriter( + log.NewNopLogger(), + newFakeTenantAppendable( + &fakeAppendable{appender: newFakeAppender(nil, nil, nil)}), + &CapNProtoWriterOptions{}, + ) + listener = bufconn.Listen(1024) + handler = NewCapNProtoHandler(log.NewNopLogger(), writer) + srv = NewCapNProtoServer(listener, handler, log.NewNopLogger()) + ) + go func() { + _ = srv.ListenAndServe() + }() + defer srv.Shutdown() + + for i := 0; i < 1000; i++ { + client := writecapnp.NewRemoteWriteClient(listener, log.NewLogfmtLogger(os.Stdout)) + _, err := client.RemoteWrite(context.Background(), &storepb.WriteRequest{ + Tenant: "default", + }) + require.NoError(t, err) + require.NoError(t, client.Close()) + } + require.NoError(t, listener.Close()) +} + +func TestCapNProtoServer_MultipleConcurrentClients(t *testing.T) { + var ( + writer = NewCapNProtoWriter( + log.NewNopLogger(), + newFakeTenantAppendable( + &fakeAppendable{appender: newFakeAppender(nil, nil, nil)}), + &CapNProtoWriterOptions{}, + ) + listener = bufconn.Listen(1024) + handler = NewCapNProtoHandler(log.NewNopLogger(), writer) + srv = NewCapNProtoServer(listener, handler, log.NewNopLogger()) + ) + go func() { + _ = srv.ListenAndServe() + }() + defer srv.Shutdown() + + for i := 0; i < 1000; i++ { + client := writecapnp.NewRemoteWriteClient(listener, log.NewLogfmtLogger(os.Stdout)) + _, err := client.RemoteWrite(context.Background(), &storepb.WriteRequest{ + Tenant: "default", + }) + require.NoError(t, err) + defer func() { + require.NoError(t, client.Close()) + }() + } + + require.NoError(t, listener.Close()) +} diff --git a/pkg/receive/capnproto_writer.go b/pkg/receive/capnproto_writer.go new file mode 100644 index 0000000000..1032b2ebd7 --- /dev/null +++ b/pkg/receive/capnproto_writer.go @@ -0,0 +1,170 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package receive + +import ( + "context" + "strings" + + "github.com/thanos-io/thanos/pkg/store/labelpb" + + "github.com/go-kit/log" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + + "github.com/thanos-io/thanos/pkg/receive/writecapnp" +) + +type CapNProtoWriterOptions struct { + TooFarInFutureTimeWindow int64 // Unit: nanoseconds +} + +type CapNProtoWriter struct { + logger log.Logger + multiTSDB TenantStorage + opts *CapNProtoWriterOptions +} + +func NewCapNProtoWriter(logger log.Logger, multiTSDB TenantStorage, opts *CapNProtoWriterOptions) *CapNProtoWriter { + if opts == nil { + opts = &CapNProtoWriterOptions{} + } + return &CapNProtoWriter{ + logger: logger, + multiTSDB: multiTSDB, + opts: opts, + } +} + +func (r *CapNProtoWriter) Write(ctx context.Context, tenantID string, wreq *writecapnp.Request) error { + tLogger := log.With(r.logger, "tenant", tenantID) + + s, err := r.multiTSDB.TenantAppendable(tenantID) + if err != nil { + return errors.Wrap(err, "get tenant appendable") + } + + app, err := s.Appender(ctx) + if err == tsdb.ErrNotReady { + return err + } + if err != nil { + return errors.Wrap(err, "get appender") + } + getRef := app.(storage.GetRef) + var ( + ref storage.SeriesRef + errorTracker = &writeErrorTracker{} + ) + app = &ReceiveAppender{ + tLogger: tLogger, + tooFarInFuture: r.opts.TooFarInFutureTimeWindow, + Appender: app, + } + + var ( + series writecapnp.Series + builder labels.ScratchBuilder + ) + for wreq.Next() { + if err := wreq.At(&series); err != nil { + return errors.Wrap(err, "request.At") + } + + // Check if time series labels are valid. If not, skip the time series + // and report the error. + if err := validateLabels(series.Labels); err != nil { + lset := &labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(series.Labels)} + errorTracker.addLabelsError(err, lset, tLogger) + continue + } + + var lset labels.Labels + // Check if the TSDB has cached reference for those labels. + ref, lset = getRef.GetRef(series.Labels, series.Labels.Hash()) + if ref == 0 { + // NOTE(GiedriusS): do a deep copy because the labels are reused in the capnp message. + // Creation of new series is much rarer compared to adding extra samples + // to an existing series. + builder.Reset() + series.Labels.Range(func(l labels.Label) { + builder.Add(strings.Clone(l.Name), strings.Clone(l.Value)) + }) + lset = builder.Labels() + } + + // Append as many valid samples as possible, but keep track of the errors. + for _, s := range series.Samples { + ref, err = app.Append(ref, lset, s.Timestamp, s.Value) + errorTracker.addSampleError(err, tLogger, lset, s.Timestamp, s.Value) + } + + for _, hp := range series.Histograms { + ref, err = app.AppendHistogram(ref, lset, hp.Timestamp, hp.Histogram, hp.FloatHistogram) + errorTracker.addHistogramError(err, tLogger, lset, hp.Timestamp) + } + + // Current implementation of app.AppendExemplar doesn't create a new series, so it must be already present. + // We drop the exemplars in case the series doesn't exist. + if ref != 0 && len(series.Exemplars) > 0 { + for _, ex := range series.Exemplars { + exLogger := log.With(tLogger, "exemplarLset", ex.Labels) + + if _, err = app.AppendExemplar(ref, lset, exemplar.Exemplar{ + Labels: ex.Labels, + Value: ex.Value, + Ts: ex.Ts, + HasTs: true, + }); err != nil { + errorTracker.addExemplarError(err, exLogger) + } + } + } + } + + errs := errorTracker.collectErrors(tLogger) + if err := app.Commit(); err != nil { + errs.Add(errors.Wrap(err, "commit samples")) + } + return errs.ErrOrNil() +} + +// ValidateLabels validates label names and values (checks for empty +// names and values, out of order labels and duplicate label names) +// Returns appropriate error if validation fails on a label. +func validateLabels(lbls labels.Labels) error { + if lbls.Len() == 0 { + return labelpb.ErrEmptyLabels + } + + var ( + prev *labels.Label + err error + ) + lbls.Range(func(l labels.Label) { + if err != nil { + return + } + if l.Name == "" || l.Value == "" { + err = labelpb.ErrEmptyLabels + } + if prev == nil { + prev = &l + return + } + + if l.Name == prev.Name { + err = labelpb.ErrDuplicateLabels + } + if l.Name < prev.Name { + err = labelpb.ErrOutOfOrderLabels + } + prev = &l + }) + + return err +} diff --git a/pkg/receive/config.go b/pkg/receive/config.go index 6f65f6895d..f453cb2263 100644 --- a/pkg/receive/config.go +++ b/pkg/receive/config.go @@ -8,9 +8,11 @@ import ( "crypto/md5" "encoding/binary" "encoding/json" + "fmt" "io" "os" "path/filepath" + "strings" "time" "github.com/fsnotify/fsnotify" @@ -36,14 +38,44 @@ const ( RouterOnly ReceiverMode = "RouterOnly" IngestorOnly ReceiverMode = "IngestorOnly" RouterIngestor ReceiverMode = "RouterIngestor" + + DefaultCapNProtoPort string = "19391" ) type Endpoint struct { - Address string `json:"address"` - AZ string `json:"az"` + Address string `json:"address"` + CapNProtoAddress string `json:"capnproto_address"` + AZ string `json:"az"` +} + +func (e *Endpoint) String() string { + return fmt.Sprintf("addr: %s, capnp_addr: %s, az: %s", e.Address, e.CapNProtoAddress, e.AZ) +} + +func (e *Endpoint) HasAddress(addr string) bool { + return e.Address == addr || e.CapNProtoAddress == addr } func (e *Endpoint) UnmarshalJSON(data []byte) error { + if err := e.unmarshal(data); err != nil { + return err + } + if e.Address == "" { + return errors.New("endpoint address must be set") + } + + // If the Cap'n proto address is not set, initialize it + // to the existing address using the default cap'n proto server port. + if e.CapNProtoAddress != "" { + return nil + } + if parts := strings.SplitN(e.Address, ":", 2); len(parts) <= 2 { + e.CapNProtoAddress = parts[0] + ":" + DefaultCapNProtoPort + } + return nil +} + +func (e *Endpoint) unmarshal(data []byte) error { // First try to unmarshal as a string. err := json.Unmarshal(data, &e.Address) if err == nil { @@ -53,12 +85,14 @@ func (e *Endpoint) UnmarshalJSON(data []byte) error { // If that fails, try to unmarshal as an endpoint object. type endpointAlias Endpoint var configEndpoint endpointAlias - err = json.Unmarshal(data, &configEndpoint) - if err == nil { - e.Address = configEndpoint.Address - e.AZ = configEndpoint.AZ + if err := json.Unmarshal(data, &configEndpoint); err != nil { + return err } - return err + + e.Address = configEndpoint.Address + e.AZ = configEndpoint.AZ + e.CapNProtoAddress = configEndpoint.CapNProtoAddress + return nil } // HashringConfig represents the configuration for a hashring diff --git a/pkg/receive/config_test.go b/pkg/receive/config_test.go index c1230f327e..62435c3283 100644 --- a/pkg/receive/config_test.go +++ b/pkg/receive/config_test.go @@ -73,14 +73,49 @@ func TestValidateConfig(t *testing.T) { } func TestUnmarshalEndpointSlice(t *testing.T) { - t.Run("Endpoints as string slice", func(t *testing.T) { - var endpoints []Endpoint - testutil.Ok(t, json.Unmarshal([]byte(`["node-1"]`), &endpoints)) - testutil.Equals(t, endpoints, []Endpoint{{Address: "node-1"}}) - }) - t.Run("Endpoints as endpoints slice", func(t *testing.T) { - var endpoints []Endpoint - testutil.Ok(t, json.Unmarshal([]byte(`[{"address": "node-1", "az": "az-1"}]`), &endpoints)) - testutil.Equals(t, endpoints, []Endpoint{{Address: "node-1", AZ: "az-1"}}) - }) + cases := []struct { + name string + json string + endpoints []Endpoint + expectErr bool + }{ + { + name: "Endpoint with empty address", + json: `[{"az": "az-1"}]`, + endpoints: []Endpoint{{Address: "node-1", CapNProtoAddress: "node-1:19391"}}, + expectErr: true, + }, + { + name: "Endpoints as string slice", + json: `["node-1"]`, + endpoints: []Endpoint{{Address: "node-1", CapNProtoAddress: "node-1:19391"}}, + }, + { + name: "Endpoints as endpoints slice", + json: `[{"address": "node-1", "az": "az-1"}]`, + endpoints: []Endpoint{{Address: "node-1", CapNProtoAddress: "node-1:19391", AZ: "az-1"}}, + }, + { + name: "Endpoints as string slice with port", + json: `["node-1:80"]`, + endpoints: []Endpoint{{Address: "node-1:80", CapNProtoAddress: "node-1:19391"}}, + }, + { + name: "Endpoints as string slice with capnproto port", + json: `[{"address": "node-1", "capnproto_address": "node-1:81"}]`, + endpoints: []Endpoint{{Address: "node-1", CapNProtoAddress: "node-1:81"}}, + }, + } + for _, tcase := range cases { + t.Run(tcase.name, func(t *testing.T) { + var endpoints []Endpoint + err := json.Unmarshal([]byte(tcase.json), &endpoints) + if tcase.expectErr { + testutil.NotOk(t, err) + return + } + testutil.Ok(t, err) + testutil.Equals(t, tcase.endpoints, endpoints) + }) + } } diff --git a/pkg/receive/handler.go b/pkg/receive/handler.go index 832d6162f1..163a29a2e1 100644 --- a/pkg/receive/handler.go +++ b/pkg/receive/handler.go @@ -15,6 +15,7 @@ import ( "net/http" "sort" "strconv" + "strings" "sync" "time" @@ -35,6 +36,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slices" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -42,6 +44,7 @@ import ( "github.com/thanos-io/thanos/pkg/api" statusapi "github.com/thanos-io/thanos/pkg/api/status" "github.com/thanos-io/thanos/pkg/logging" + "github.com/thanos-io/thanos/pkg/receive/writecapnp" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/pool" @@ -68,6 +71,13 @@ const ( labelError = "error" ) +type ReplicationProtocol string + +const ( + ProtobufReplication ReplicationProtocol = "protobuf" + CapNProtoReplication ReplicationProtocol = "capnproto" +) + var ( // errConflict is returned whenever an operation fails due to any conflict-type error. errConflict = errors.New("conflict") @@ -105,6 +115,7 @@ type Options struct { TSDBStats TSDBStats Limiter *Limiter AsyncForwardWorkerCount uint + ReplicationProtocol ReplicationProtocol } // Handler serves a Prometheus remote write receiving HTTP endpoint. @@ -154,6 +165,7 @@ func NewHandler(logger log.Logger, o *Options) *Handler { options: o, splitTenantLabelName: o.SplitTenantLabelName, peers: newPeerGroup( + logger, backoff.Backoff{ Factor: 2, Min: 100 * time.Millisecond, @@ -168,6 +180,7 @@ func NewHandler(logger log.Logger, o *Options) *Handler { }, ), workers, + o.ReplicationProtocol, o.DialOpts...), receiverMode: o.ReceiverMode, Limiter: o.Limiter, @@ -305,9 +318,9 @@ func (h *Handler) Hashring(hashring Hashring) { // getSortedStringSliceDiff returns items which are in slice1 but not in slice2. // The returned slice also only contains unique items i.e. it is a set. -func getSortedStringSliceDiff(slice1, slice2 []string) []string { - slice1Items := make(map[string]struct{}, len(slice1)) - slice2Items := make(map[string]struct{}, len(slice2)) +func getSortedStringSliceDiff(slice1, slice2 []Endpoint) []Endpoint { + slice1Items := make(map[Endpoint]struct{}, len(slice1)) + slice2Items := make(map[Endpoint]struct{}, len(slice2)) for _, s1 := range slice1 { slice1Items[s1] = struct{}{} @@ -316,7 +329,7 @@ func getSortedStringSliceDiff(slice1, slice2 []string) []string { slice2Items[s2] = struct{}{} } - var difference = make([]string, 0) + var difference = make([]Endpoint, 0) for s1 := range slice1Items { _, s2Contains := slice2Items[s1] if s2Contains { @@ -324,7 +337,9 @@ func getSortedStringSliceDiff(slice1, slice2 []string) []string { } difference = append(difference, s1) } - sort.Strings(difference) + slices.SortFunc(difference, func(a, b Endpoint) int { + return strings.Compare(a.String(), b.String()) + }) return difference } @@ -399,6 +414,7 @@ func (h *Handler) getStats(r *http.Request, statsByLabelName string) ([]statusap // Close stops the Handler. func (h *Handler) Close() { + _ = h.peers.Close() runutil.CloseWithLogOnErr(h.logger, h.httpSrv, "receive HTTP server") } @@ -435,7 +451,7 @@ type replica struct { // endpointReplica is a pair of a receive endpoint and a write request replica. type endpointReplica struct { - endpoint string + endpoint Endpoint replica uint64 } @@ -852,7 +868,7 @@ func (h *Handler) distributeTimeseriesToReplicas( } endpointReplica := endpointReplica{endpoint: endpoint, replica: rn} var writeDestination = remoteWrites - if endpoint == h.options.Endpoint { + if endpoint.HasAddress(h.options.Endpoint) { writeDestination = localWrites } writeableSeries, ok := writeDestination[endpointReplica] @@ -931,9 +947,7 @@ func (h *Handler) sendLocalWrite( } for tenant, series := range tenantSeriesMapping { - err := h.writer.Write(tracingCtx, tenant, &prompb.WriteRequest{ - Timeseries: series, - }) + err := h.writer.Write(tracingCtx, tenant, series) if err != nil { span.SetTag("error", true) span.SetTag("error.msg", err.Error()) @@ -1301,44 +1315,75 @@ func newReplicationErrors(threshold, numErrors int) []*replicationErrors { return errs } -func newPeerWorker(cc *grpc.ClientConn, forwardDelay prometheus.Histogram, asyncWorkerCount uint) *peerWorker { +func newPeerWorker(client peerClient, forwardDelay prometheus.Histogram, asyncWorkerCount uint) *peerWorker { return &peerWorker{ - cc: cc, + client: client, wp: pool.NewWorkerPool(asyncWorkerCount), forwardDelay: forwardDelay, } } func (pw *peerWorker) RemoteWrite(ctx context.Context, in *storepb.WriteRequest, opts ...grpc.CallOption) (*storepb.WriteResponse, error) { - return storepb.NewWriteableStoreClient(pw.cc).RemoteWrite(ctx, in) + return pw.client.RemoteWrite(ctx, in) +} + +type peerClient interface { + storepb.WriteableStoreClient + io.Closer +} + +type protobufPeer struct { + storepb.WriteableStoreClient + conn *grpc.ClientConn +} + +func newProtobufPeer(conn *grpc.ClientConn) *protobufPeer { + return &protobufPeer{ + WriteableStoreClient: storepb.NewWriteableStoreClient(conn), + conn: conn, + } +} + +func (p protobufPeer) Close() error { + return p.conn.Close() } type peerWorker struct { - cc *grpc.ClientConn - wp pool.WorkerPool + client peerClient + wp pool.WorkerPool forwardDelay prometheus.Histogram } -func newPeerGroup(backoff backoff.Backoff, forwardDelay prometheus.Histogram, asyncForwardWorkersCount uint, dialOpts ...grpc.DialOption) peersContainer { +func newPeerGroup( + logger log.Logger, + backoff backoff.Backoff, + forwardDelay prometheus.Histogram, + asyncForwardWorkersCount uint, + replicationProtocol ReplicationProtocol, + dialOpts ...grpc.DialOption, +) *peerGroup { return &peerGroup{ + logger: logger, dialOpts: dialOpts, - connections: map[string]*peerWorker{}, + connections: map[Endpoint]*peerWorker{}, m: sync.RWMutex{}, dialer: grpc.NewClient, - peerStates: make(map[string]*retryState), + peerStates: make(map[Endpoint]*retryState), expBackoff: backoff, forwardDelay: forwardDelay, asyncForwardWorkersCount: asyncForwardWorkersCount, + replicationProtocol: replicationProtocol, } } type peersContainer interface { - close(string) error - getConnection(context.Context, string) (WriteableStoreAsyncClient, error) - markPeerUnavailable(string) - markPeerAvailable(string) + close(Endpoint) error + getConnection(context.Context, Endpoint) (WriteableStoreAsyncClient, error) + markPeerUnavailable(Endpoint) + markPeerAvailable(Endpoint) reset() + io.Closer } func (p *peerWorker) RemoteWriteAsync(ctx context.Context, req *storepb.WriteRequest, er endpointReplica, seriesIDs []int, responseWriter chan writeResponse, cb func(error)) { @@ -1347,7 +1392,7 @@ func (p *peerWorker) RemoteWriteAsync(ctx context.Context, req *storepb.WriteReq p.forwardDelay.Observe(time.Since(now).Seconds()) tracing.DoInSpan(ctx, "receive_forward", func(ctx context.Context) { - _, err := storepb.NewWriteableStoreClient(p.cc).RemoteWrite(ctx, req) + _, err := p.client.RemoteWrite(ctx, req) responseWriter <- newWriteResponse( seriesIDs, errors.Wrapf(err, "forwarding request to endpoint %v", er.endpoint), @@ -1367,12 +1412,14 @@ func (p *peerWorker) RemoteWriteAsync(ctx context.Context, req *storepb.WriteReq } type peerGroup struct { + logger log.Logger dialOpts []grpc.DialOption - connections map[string]*peerWorker - peerStates map[string]*retryState + connections map[Endpoint]*peerWorker + peerStates map[Endpoint]*retryState expBackoff backoff.Backoff forwardDelay prometheus.Histogram asyncForwardWorkersCount uint + replicationProtocol ReplicationProtocol m sync.RWMutex @@ -1380,34 +1427,41 @@ type peerGroup struct { dialer func(target string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) } -func (p *peerGroup) close(addr string) error { +func (p *peerGroup) Close() error { + for _, c := range p.connections { + c.wp.Close() + } + return nil +} + +func (p *peerGroup) close(endpoint Endpoint) error { p.m.Lock() defer p.m.Unlock() - c, ok := p.connections[addr] + c, ok := p.connections[endpoint] if !ok { // NOTE(GiedriusS): this could be valid case when the connection // was never established. return nil } - p.connections[addr].wp.Close() - delete(p.connections, addr) - if err := c.cc.Close(); err != nil { - return fmt.Errorf("closing connection for %s", addr) + p.connections[endpoint].wp.Close() + delete(p.connections, endpoint) + if err := c.client.Close(); err != nil { + return fmt.Errorf("closing connection for %s", endpoint) } return nil } -func (p *peerGroup) getConnection(ctx context.Context, addr string) (WriteableStoreAsyncClient, error) { - if !p.isPeerUp(addr) { +func (p *peerGroup) getConnection(ctx context.Context, endpoint Endpoint) (WriteableStoreAsyncClient, error) { + if !p.isPeerUp(endpoint) { return nil, errUnavailable } // use a RLock first to prevent blocking if we don't need to. p.m.RLock() - c, ok := p.connections[addr] + c, ok := p.connections[endpoint] p.m.RUnlock() if ok { return c, nil @@ -1416,29 +1470,40 @@ func (p *peerGroup) getConnection(ctx context.Context, addr string) (WriteableSt p.m.Lock() defer p.m.Unlock() // Make sure that another caller hasn't created the connection since obtaining the write lock. - c, ok = p.connections[addr] + c, ok = p.connections[endpoint] if ok { return c, nil } - conn, err := p.dialer(addr, p.dialOpts...) - if err != nil { - p.markPeerUnavailableUnlocked(addr) - dialError := errors.Wrap(err, "failed to dial peer") - return nil, errors.Wrap(dialError, errUnavailable.Error()) + + var client peerClient + switch p.replicationProtocol { + case CapNProtoReplication: + client = writecapnp.NewRemoteWriteClient(writecapnp.NewTCPDialer(endpoint.CapNProtoAddress), p.logger) + + case ProtobufReplication: + conn, err := p.dialer(endpoint.Address, p.dialOpts...) + if err != nil { + p.markPeerUnavailableUnlocked(endpoint) + dialError := errors.Wrap(err, "failed to dial peer") + return nil, errors.Wrap(dialError, errUnavailable.Error()) + } + client = newProtobufPeer(conn) + default: + return nil, errors.Errorf("unknown replication protocol %v", p.replicationProtocol) } - p.connections[addr] = newPeerWorker(conn, p.forwardDelay, p.asyncForwardWorkersCount) - return p.connections[addr], nil + p.connections[endpoint] = newPeerWorker(client, p.forwardDelay, p.asyncForwardWorkersCount) + return p.connections[endpoint], nil } -func (p *peerGroup) markPeerUnavailable(addr string) { +func (p *peerGroup) markPeerUnavailable(addr Endpoint) { p.m.Lock() defer p.m.Unlock() p.markPeerUnavailableUnlocked(addr) } -func (p *peerGroup) markPeerUnavailableUnlocked(addr string) { +func (p *peerGroup) markPeerUnavailableUnlocked(addr Endpoint) { state, ok := p.peerStates[addr] if !ok { state = &retryState{attempt: -1} @@ -1448,13 +1513,13 @@ func (p *peerGroup) markPeerUnavailableUnlocked(addr string) { p.peerStates[addr] = state } -func (p *peerGroup) markPeerAvailable(addr string) { +func (p *peerGroup) markPeerAvailable(addr Endpoint) { p.m.Lock() defer p.m.Unlock() delete(p.peerStates, addr) } -func (p *peerGroup) isPeerUp(addr string) bool { +func (p *peerGroup) isPeerUp(addr Endpoint) bool { p.m.RLock() defer p.m.RUnlock() state, ok := p.peerStates[addr] @@ -1466,5 +1531,5 @@ func (p *peerGroup) isPeerUp(addr string) bool { func (p *peerGroup) reset() { p.expBackoff.Reset() - p.peerStates = make(map[string]*retryState) + p.peerStates = make(map[Endpoint]*retryState) } diff --git a/pkg/receive/handler_test.go b/pkg/receive/handler_test.go index 4f81a3d1ca..44d6306fac 100644 --- a/pkg/receive/handler_test.go +++ b/pkg/receive/handler_test.go @@ -22,10 +22,12 @@ import ( "testing" "time" - "google.golang.org/grpc" "gopkg.in/yaml.v3" + goerrors "errors" + "github.com/alecthomas/units" + "github.com/efficientgo/core/testutil" "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" @@ -40,12 +42,13 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/require" - - "github.com/efficientgo/core/testutil" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/logging" + "github.com/thanos-io/thanos/pkg/receive/writecapnp" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -169,51 +172,68 @@ func (f *fakeAppender) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels } type fakePeersGroup struct { - clients map[string]WriteableStoreAsyncClient + clients map[Endpoint]*peerWorker + + closeCalled map[Endpoint]bool +} - closeCalled map[string]bool +func (g *fakePeersGroup) Close() error { + for _, c := range g.clients { + c.wp.Close() + } + return nil } -func (g *fakePeersGroup) markPeerUnavailable(s string) { +func (g *fakePeersGroup) markPeerUnavailable(s Endpoint) { } -func (g *fakePeersGroup) markPeerAvailable(s string) { +func (g *fakePeersGroup) markPeerAvailable(s Endpoint) { } func (g *fakePeersGroup) reset() { } -func (g *fakePeersGroup) close(addr string) error { +func (g *fakePeersGroup) close(addr Endpoint) error { if g.closeCalled == nil { - g.closeCalled = map[string]bool{} + g.closeCalled = map[Endpoint]bool{} } g.closeCalled[addr] = true + g.clients[addr].wp.Close() return nil } -func (g *fakePeersGroup) getConnection(_ context.Context, addr string) (WriteableStoreAsyncClient, error) { - c, ok := g.clients[addr] +func (g *fakePeersGroup) getConnection(_ context.Context, endpoint Endpoint) (WriteableStoreAsyncClient, error) { + c, ok := g.clients[endpoint] if !ok { - return nil, fmt.Errorf("client %s not found", addr) + return nil, fmt.Errorf("client %s not found", endpoint) } return c, nil } var _ = (peersContainer)(&fakePeersGroup{}) -func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uint64, hashringAlgo HashringAlgorithm) ([]*Handler, Hashring, error) { +func newTestHandlerHashring( + appendables []*fakeAppendable, + replicationFactor uint64, + hashringAlgo HashringAlgorithm, + capnpReplication bool, +) ([]*Handler, Hashring, func() error, error) { var ( cfg = []HashringConfig{{Hashring: "test"}} handlers []*Handler wOpts = &WriterOptions{} ) fakePeers := &fakePeersGroup{ - clients: map[string]WriteableStoreAsyncClient{}, + clients: map[Endpoint]*peerWorker{}, } - ag := addrGen{} - limiter, _ := NewLimiter(NewNopConfig(), nil, RouterIngestor, log.NewNopLogger(), 1*time.Second) - logger := logging.NewLogger("debug", "logfmt", "receive_test") + var ( + closers = make([]func() error, 0) + + ag = addrGen{} + logger = logging.NewLogger("debug", "logfmt", "receive_test") + limiter, _ = NewLimiter(NewNopConfig(), nil, RouterIngestor, log.NewNopLogger(), 1*time.Second) + ) for i := range appendables { h := NewHandler(logger, &Options{ TenantHeader: tenancy.DefaultTenantHeader, @@ -224,11 +244,30 @@ func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uin Limiter: limiter, }) handlers = append(handlers, h) - addr := ag.newAddr() h.peers = fakePeers - fakePeers.clients[addr] = &fakeRemoteWriteGRPCServer{h: h} - h.options.Endpoint = addr - cfg[0].Endpoints = append(cfg[0].Endpoints, Endpoint{Address: h.options.Endpoint}) + endpoint := ag.newEndpoint() + h.options.Endpoint = endpoint.Address + cfg[0].Endpoints = append(cfg[0].Endpoints, endpoint) + + var peer *peerWorker + if capnpReplication { + writer := NewCapNProtoWriter(logger, newFakeTenantAppendable(appendables[i]), nil) + var ( + listener = bufconn.Listen(1024) + handler = NewCapNProtoHandler(log.NewNopLogger(), writer) + ) + srv := NewCapNProtoServer(listener, handler, log.NewNopLogger()) + client := writecapnp.NewRemoteWriteClient(listener, logger) + peer = newPeerWorker(client, prometheus.NewHistogram(prometheus.HistogramOpts{}), 1) + closers = append(closers, func() error { + srv.Shutdown() + return goerrors.Join(listener.Close(), client.Close()) + }) + go func() { _ = srv.ListenAndServe() }() + } else { + peer = newPeerWorker(&fakeRemoteWriteGRPCServer{h: h}, prometheus.NewHistogram(prometheus.HistogramOpts{}), 1) + } + fakePeers.clients[endpoint] = peer } // Use hashmod as default. if hashringAlgo == "" { @@ -237,15 +276,22 @@ func newTestHandlerHashring(appendables []*fakeAppendable, replicationFactor uin hashring, err := NewMultiHashring(hashringAlgo, replicationFactor, cfg) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, h := range handlers { h.Hashring(hashring) } - return handlers, hashring, nil + closeFunc := func() error { + errs := make([]error, 0, len(closers)) + for _, closeFunc := range closers { + errs = append(errs, closeFunc()) + } + return goerrors.Join(errs...) + } + return handlers, hashring, closeFunc, nil } -func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsistencyDelay bool) { +func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsistencyDelay, capnpReplication bool) { appenderErrFn := func() error { return errors.New("failed to get appender") } conflictErrFn := func() error { return storage.ErrOutOfBounds } tooOldSampleErrFn := func() error { return storage.ErrTooOldSample } @@ -613,10 +659,19 @@ func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsist }, } { t.Run(tc.name, func(t *testing.T) { - handlers, hashring, err := newTestHandlerHashring(tc.appendables, tc.replicationFactor, hashringAlgo) + handlers, hashring, closeFunc, err := newTestHandlerHashring(tc.appendables, tc.replicationFactor, hashringAlgo, capnpReplication) if err != nil { t.Fatalf("unable to create test handler: %v", err) } + defer func() { + testutil.Ok(t, closeFunc()) + // Wait a few milliseconds for peer workers to process the queue. + time.AfterFunc(50*time.Millisecond, func() { + for _, h := range handlers { + h.Close() + } + }) + }() tenant := "test" if tc.randomNode { @@ -681,6 +736,7 @@ func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsist t.Errorf("handler: %d, labels %q: expected minimum of %d samples, got %d", j, lset.String(), expectedMin, got) } } + } } }) @@ -688,19 +744,35 @@ func testReceiveQuorum(t *testing.T, hashringAlgo HashringAlgorithm, withConsist } func TestReceiveQuorumHashmod(t *testing.T) { - testReceiveQuorum(t, AlgorithmHashmod, false) + for _, capnpReplication := range []bool{false, true} { + t.Run(fmt.Sprintf("capnproto-replication=%t", capnpReplication), func(t *testing.T) { + testReceiveQuorum(t, AlgorithmHashmod, false, capnpReplication) + }) + } } func TestReceiveQuorumKetama(t *testing.T) { - testReceiveQuorum(t, AlgorithmKetama, false) + for _, capnpReplication := range []bool{false, true} { + t.Run(fmt.Sprintf("capnproto-replication=%t", capnpReplication), func(t *testing.T) { + testReceiveQuorum(t, AlgorithmKetama, false, capnpReplication) + }) + } } func TestReceiveWithConsistencyDelayHashmod(t *testing.T) { - testReceiveQuorum(t, AlgorithmHashmod, true) + for _, capnpReplication := range []bool{false, true} { + t.Run(fmt.Sprintf("capnproto-replication=%t", capnpReplication), func(t *testing.T) { + testReceiveQuorum(t, AlgorithmHashmod, true, capnpReplication) + }) + } } func TestReceiveWithConsistencyDelayKetama(t *testing.T) { - testReceiveQuorum(t, AlgorithmKetama, true) + for _, capnpReplication := range []bool{false, true} { + t.Run(fmt.Sprintf("capnproto-replication=%t", capnpReplication), func(t *testing.T) { + testReceiveQuorum(t, AlgorithmKetama, true, capnpReplication) + }) + } } func TestReceiveWriteRequestLimits(t *testing.T) { @@ -755,10 +827,20 @@ func TestReceiveWriteRequestLimits(t *testing.T) { appender: newFakeAppender(nil, nil, nil), }, } - handlers, _, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod) + handlers, _, closeFunc, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod, false) if err != nil { t.Fatalf("unable to create test handler: %v", err) } + defer func() { + testutil.Ok(t, closeFunc()) + // Wait a few milliseconds for peer workers to process the queue. + time.AfterFunc(50*time.Millisecond, func() { + for _, h := range handlers { + h.Close() + } + }) + }() + handler := handlers[0] tenant := "test" @@ -820,7 +902,7 @@ func endpointHit(t *testing.T, h Hashring, rf uint64, endpoint, tenant string, t if err != nil { t.Fatalf("got unexpected error querying hashring: %v", err) } - if e == endpoint { + if e.HasAddress(endpoint) { return true } } @@ -864,9 +946,13 @@ func makeRequest(h *Handler, tenant string, wreq *prompb.WriteRequest) (*httptes type addrGen struct{ n int } -func (a *addrGen) newAddr() string { +func (a *addrGen) newEndpoint() Endpoint { a.n++ - return fmt.Sprintf("http://node-%d:%d", a.n, 12345+a.n) + addr := fmt.Sprintf("http://node-%d:%d", a.n, 12345+a.n) + return Endpoint{ + Address: addr, + CapNProtoAddress: addr, + } } type fakeRemoteWriteGRPCServer struct { @@ -887,6 +973,8 @@ func (f *fakeRemoteWriteGRPCServer) RemoteWriteAsync(ctx context.Context, in *st cb(err) } +func (f *fakeRemoteWriteGRPCServer) Close() error { return nil } + func BenchmarkHandlerReceiveHTTP(b *testing.B) { benchmarkHandlerMultiTSDBReceiveRemoteWrite(testutil.NewTB(b)) } @@ -977,10 +1065,13 @@ func makeSeriesWithValues(numSeries int) []prompb.TimeSeries { func benchmarkHandlerMultiTSDBReceiveRemoteWrite(b testutil.TB) { dir := b.TempDir() - handlers, _, err := newTestHandlerHashring([]*fakeAppendable{nil}, 1, AlgorithmHashmod) + handlers, _, closeFunc, err := newTestHandlerHashring([]*fakeAppendable{nil}, 1, AlgorithmHashmod, false) if err != nil { b.Fatalf("unable to create test handler: %v", err) } + defer func() { + testutil.Ok(b, closeFunc()) + }() handler := handlers[0] reg := prometheus.NewRegistry() @@ -1619,11 +1710,12 @@ func TestGetStatsLimitParameter(t *testing.T) { }) } -func TestSortedSliceDiff(t *testing.T) { - testutil.Equals(t, []string{"a"}, getSortedStringSliceDiff([]string{"a", "a", "foo"}, []string{"b", "b", "foo"})) - testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{"b", "b", "foo"})) - testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{})) -} +// +//func TestSortedSliceDiff(t *testing.T) { +// testutil.Equals(t, []string{"a"}, getSortedStringSliceDiff([]string{"a", "a", "foo"}, []string{"b", "b", "foo"})) +// testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{"b", "b", "foo"})) +// testutil.Equals(t, []string{}, getSortedStringSliceDiff([]string{}, []string{})) +//} func TestHashringChangeCallsClose(t *testing.T) { appendables := []*fakeAppendable{ @@ -1637,13 +1729,15 @@ func TestHashringChangeCallsClose(t *testing.T) { appender: newFakeAppender(nil, nil, nil), }, } - allHandlers, _, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod) + allHandlers, _, closeFunc, err := newTestHandlerHashring(appendables, 3, AlgorithmHashmod, false) testutil.Ok(t, err) + testutil.Ok(t, closeFunc()) appendables = appendables[1:] - _, smallHashring, err := newTestHandlerHashring(appendables, 2, AlgorithmHashmod) + _, smallHashring, closeFunc, err := newTestHandlerHashring(appendables, 2, AlgorithmHashmod, false) testutil.Ok(t, err) + testutil.Ok(t, closeFunc()) allHandlers[0].Hashring(smallHashring) @@ -1666,7 +1760,7 @@ type hashringSeenTenants struct { seenTenants map[string]struct{} } -func (h *hashringSeenTenants) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (string, error) { +func (h *hashringSeenTenants) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (Endpoint, error) { if h.seenTenants == nil { h.seenTenants = map[string]struct{}{} } @@ -1680,11 +1774,8 @@ func TestDistributeSeries(t *testing.T) { SplitTenantLabelName: tenantIDLabelName, }) - hashring, err := newSimpleHashring([]Endpoint{ - { - Address: "http://localhost:9090", - }, - }) + endpoint := Endpoint{Address: "http://localhost:9090", CapNProtoAddress: "http://localhost:19391"} + hashring, err := newSimpleHashring([]Endpoint{endpoint}) require.NoError(t, err) hr := &hashringSeenTenants{Hashring: hashring} h.Hashring(hr) @@ -1703,11 +1794,12 @@ func TestDistributeSeries(t *testing.T) { ) require.NoError(t, err) require.Len(t, remote, 1) - require.Len(t, remote[endpointReplica{endpoint: "http://localhost:9090", replica: 0}]["bar"].timeSeries, 1) - require.Len(t, remote[endpointReplica{endpoint: "http://localhost:9090", replica: 0}]["boo"].timeSeries, 1) + require.Len(t, remote[endpointReplica{endpoint: endpoint, replica: 0}]["bar"].timeSeries, 1) + require.Len(t, remote[endpointReplica{endpoint: endpoint, replica: 0}]["boo"].timeSeries, 1) + + require.Equal(t, 1, labelpb.ZLabelsToPromLabels(remote[endpointReplica{endpoint: endpoint, replica: 0}]["bar"].timeSeries[0].Labels).Len()) + require.Equal(t, 1, labelpb.ZLabelsToPromLabels(remote[endpointReplica{endpoint: endpoint, replica: 0}]["boo"].timeSeries[0].Labels).Len()) - require.Equal(t, 1, labelpb.ZLabelsToPromLabels(remote[endpointReplica{endpoint: "http://localhost:9090", replica: 0}]["bar"].timeSeries[0].Labels).Len()) - require.Equal(t, 1, labelpb.ZLabelsToPromLabels(remote[endpointReplica{endpoint: "http://localhost:9090", replica: 0}]["boo"].timeSeries[0].Labels).Len()) require.Equal(t, map[string]struct{}{"bar": {}, "boo": {}}, hr.seenTenants) } diff --git a/pkg/receive/hashring.go b/pkg/receive/hashring.go index cb05f956fb..a5a14e6779 100644 --- a/pkg/receive/hashring.go +++ b/pkg/receive/hashring.go @@ -9,9 +9,11 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "github.com/cespare/xxhash" + "golang.org/x/exp/slices" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -53,63 +55,66 @@ func (i *insufficientNodesError) Error() string { // It returns the node and any error encountered. type Hashring interface { // Get returns the first node that should handle the given tenant and time series. - Get(tenant string, timeSeries *prompb.TimeSeries) (string, error) + Get(tenant string, timeSeries *prompb.TimeSeries) (Endpoint, error) // GetN returns the nth node that should handle the given tenant and time series. - GetN(tenant string, timeSeries *prompb.TimeSeries, n uint64) (string, error) + GetN(tenant string, timeSeries *prompb.TimeSeries, n uint64) (Endpoint, error) // Nodes returns a sorted slice of nodes that are in this hashring. Addresses could be duplicated // if, for example, the same address is used for multiple tenants in the multi-hashring. - Nodes() []string + Nodes() []Endpoint } // SingleNodeHashring always returns the same node. type SingleNodeHashring string // Get implements the Hashring interface. -func (s SingleNodeHashring) Get(tenant string, ts *prompb.TimeSeries) (string, error) { +func (s SingleNodeHashring) Get(tenant string, ts *prompb.TimeSeries) (Endpoint, error) { return s.GetN(tenant, ts, 0) } -func (s SingleNodeHashring) Nodes() []string { - return []string{string(s)} +func (s SingleNodeHashring) Nodes() []Endpoint { + return []Endpoint{{Address: string(s), CapNProtoAddress: string(s)}} } // GetN implements the Hashring interface. -func (s SingleNodeHashring) GetN(_ string, _ *prompb.TimeSeries, n uint64) (string, error) { +func (s SingleNodeHashring) GetN(_ string, _ *prompb.TimeSeries, n uint64) (Endpoint, error) { if n > 0 { - return "", &insufficientNodesError{have: 1, want: n + 1} + return Endpoint{}, &insufficientNodesError{have: 1, want: n + 1} } - return string(s), nil + return Endpoint{ + Address: string(s), + CapNProtoAddress: string(s), + }, nil } // simpleHashring represents a group of nodes handling write requests by hashmoding individual series. -type simpleHashring []string +type simpleHashring []Endpoint func newSimpleHashring(endpoints []Endpoint) (Hashring, error) { - addresses := make([]string, len(endpoints)) for i := range endpoints { if endpoints[i].AZ != "" { return nil, errors.New("Hashmod algorithm does not support AZ aware hashring configuration. Either use Ketama or remove AZ configuration.") } - addresses[i] = endpoints[i].Address } - sort.Strings(addresses) + slices.SortFunc(endpoints, func(a, b Endpoint) int { + return strings.Compare(a.Address, b.Address) + }) - return simpleHashring(addresses), nil + return simpleHashring(endpoints), nil } -func (s simpleHashring) Nodes() []string { +func (s simpleHashring) Nodes() []Endpoint { return s } // Get returns a target to handle the given tenant and time series. -func (s simpleHashring) Get(tenant string, ts *prompb.TimeSeries) (string, error) { +func (s simpleHashring) Get(tenant string, ts *prompb.TimeSeries) (Endpoint, error) { return s.GetN(tenant, ts, 0) } // GetN returns the nth target to handle the given tenant and time series. -func (s simpleHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (string, error) { +func (s simpleHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (Endpoint, error) { if n >= uint64(len(s)) { - return "", &insufficientNodesError{have: uint64(len(s)), want: n + 1} + return Endpoint{}, &insufficientNodesError{have: uint64(len(s)), want: n + 1} } return s[(labelpb.HashWithPrefix(tenant, ts.Labels)+n)%uint64(len(s))], nil @@ -134,7 +139,6 @@ type ketamaHashring struct { endpoints []Endpoint sections sections numEndpoints uint64 - nodes []string } func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFactor uint64) (*ketamaHashring, error) { @@ -148,10 +152,8 @@ func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFac availabilityZones := make(map[string]struct{}) ringSections := make(sections, 0, numSections) - nodes := []string{} for endpointIndex, endpoint := range endpoints { availabilityZones[endpoint.AZ] = struct{}{} - nodes = append(nodes, endpoint.Address) for i := 1; i <= sectionsPerNode; i++ { _, _ = hash.Write([]byte(endpoint.Address + ":" + strconv.Itoa(i))) n := §ion{ @@ -166,19 +168,17 @@ func newKetamaHashring(endpoints []Endpoint, sectionsPerNode int, replicationFac } } sort.Sort(ringSections) - sort.Strings(nodes) calculateSectionReplicas(ringSections, replicationFactor, availabilityZones) return &ketamaHashring{ endpoints: endpoints, sections: ringSections, numEndpoints: uint64(len(endpoints)), - nodes: nodes, }, nil } -func (k *ketamaHashring) Nodes() []string { - return k.nodes +func (k *ketamaHashring) Nodes() []Endpoint { + return k.endpoints } func sizeOfLeastOccupiedAZ(azSpread map[string]int64) int64 { @@ -219,13 +219,13 @@ func calculateSectionReplicas(ringSections sections, replicationFactor uint64, a } } -func (c ketamaHashring) Get(tenant string, ts *prompb.TimeSeries) (string, error) { +func (c ketamaHashring) Get(tenant string, ts *prompb.TimeSeries) (Endpoint, error) { return c.GetN(tenant, ts, 0) } -func (c ketamaHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (string, error) { +func (c ketamaHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (Endpoint, error) { if n >= c.numEndpoints { - return "", &insufficientNodesError{have: c.numEndpoints, want: n + 1} + return Endpoint{}, &insufficientNodesError{have: c.numEndpoints, want: n + 1} } v := labelpb.HashWithPrefix(tenant, ts.Labels) @@ -241,7 +241,7 @@ func (c ketamaHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (st } endpointIndex := c.sections[i].replicas[n] - return c.endpoints[endpointIndex].Address, nil + return c.endpoints[endpointIndex], nil } // multiHashring represents a set of hashrings. @@ -257,16 +257,16 @@ type multiHashring struct { // and read from. mu sync.RWMutex - nodes []string + nodes []Endpoint } // Get returns a target to handle the given tenant and time series. -func (m *multiHashring) Get(tenant string, ts *prompb.TimeSeries) (string, error) { +func (m *multiHashring) Get(tenant string, ts *prompb.TimeSeries) (Endpoint, error) { return m.GetN(tenant, ts, 0) } // GetN returns the nth target to handle the given tenant and time series. -func (m *multiHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (string, error) { +func (m *multiHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (Endpoint, error) { m.mu.RLock() h, ok := m.cache[tenant] m.mu.RUnlock() @@ -292,7 +292,7 @@ func (m *multiHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (st case TenantMatcherGlob: matches, err := filepath.Match(tenantPattern, tenant) if err != nil { - return "", fmt.Errorf("error matching tenant pattern %s (tenant %s): %w", tenantPattern, tenant, err) + return Endpoint{}, fmt.Errorf("error matching tenant pattern %s (tenant %s): %w", tenantPattern, tenant, err) } found = matches case TenantMatcherTypeExact: @@ -314,10 +314,10 @@ func (m *multiHashring) GetN(tenant string, ts *prompb.TimeSeries, n uint64) (st return m.hashrings[i].GetN(tenant, ts, n) } } - return "", errors.New("no matching hashring to handle tenant") + return Endpoint{}, errors.New("no matching hashring to handle tenant") } -func (m *multiHashring) Nodes() []string { +func (m *multiHashring) Nodes() []Endpoint { return m.nodes } @@ -352,7 +352,9 @@ func NewMultiHashring(algorithm HashringAlgorithm, replicationFactor uint64, cfg } m.tenantSets = append(m.tenantSets, t) } - sort.Strings(m.nodes) + slices.SortFunc(m.nodes, func(a, b Endpoint) int { + return strings.Compare(a.Address, b.Address) + }) return m, nil } diff --git a/pkg/receive/hashring_test.go b/pkg/receive/hashring_test.go index ad6d4db855..3079490077 100644 --- a/pkg/receive/hashring_test.go +++ b/pkg/receive/hashring_test.go @@ -148,7 +148,7 @@ func TestHashringGet(t *testing.T) { t.Errorf("case %q: got unexpected error: %v", tc.name, err) continue } - if _, ok := tc.nodes[h]; !ok { + if _, ok := tc.nodes[h.Address]; !ok { t.Errorf("case %q: got unexpected node %q", tc.name, h) } continue @@ -236,7 +236,7 @@ func TestKetamaHashringGet(t *testing.T) { result, err := hashRing.GetN("tenant", test.ts, test.n) require.NoError(t, err) - require.Equal(t, test.expectedNode, result) + require.Equal(t, test.expectedNode, result.Address) }) } } @@ -459,7 +459,7 @@ func TestKetamaHashringEvenAZSpread(t *testing.T) { testutil.Ok(t, err) for _, n := range tt.nodes { - if !strings.HasPrefix(n.Address, r) { + if !strings.HasPrefix(n.Address, r.Address) { continue } azSpread[n.AZ]++ @@ -561,7 +561,7 @@ func TestKetamaHashringEvenNodeSpread(t *testing.T) { r, err := hashRing.GetN(tenant, ts, uint64(j)) testutil.Ok(t, err) - nodeSpread[r]++ + nodeSpread[r.Address]++ } } for _, node := range nodeSpread { @@ -636,7 +636,7 @@ func assignReplicatedSeries(series []prompb.TimeSeries, nodes []Endpoint, replic if err != nil { return nil, err } - assignments[result] = append(assignments[result], ts) + assignments[result.Address] = append(assignments[result.Address], ts) } } diff --git a/pkg/receive/writecapnp/client.go b/pkg/receive/writecapnp/client.go new file mode 100644 index 0000000000..0a20d90d44 --- /dev/null +++ b/pkg/receive/writecapnp/client.go @@ -0,0 +1,146 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "context" + "net" + "sync" + + "capnproto.org/go/capnp/v3" + "capnproto.org/go/capnp/v3/rpc" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +type Dialer interface { + Dial() (net.Conn, error) +} + +type TCPDialer struct { + address string +} + +func NewTCPDialer(address string) *TCPDialer { + return &TCPDialer{address: address} +} + +func (t TCPDialer) Dial() (net.Conn, error) { + addr, err := net.ResolveTCPAddr("tcp", t.address) + if err != nil { + return nil, err + } + conn, err := net.DialTCP("tcp", nil, addr) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial peer %s", t.address) + } + return conn, nil +} + +type RemoteWriteClient struct { + mu sync.Mutex + + dialer Dialer + conn *rpc.Conn + + writer Writer + logger log.Logger +} + +func NewRemoteWriteClient(dialer Dialer, logger log.Logger) *RemoteWriteClient { + return &RemoteWriteClient{ + dialer: dialer, + logger: logger, + } +} + +func (r *RemoteWriteClient) RemoteWrite(ctx context.Context, in *storepb.WriteRequest, _ ...grpc.CallOption) (*storepb.WriteResponse, error) { + return r.writeWithReconnect(ctx, 2, in) +} + +func (r *RemoteWriteClient) writeWithReconnect(ctx context.Context, numReconnects int, in *storepb.WriteRequest) (*storepb.WriteResponse, error) { + if err := r.connect(ctx); err != nil { + return nil, err + } + arena := capnp.SingleSegment(nil) + defer arena.Release() + + result, release := r.writer.Write(ctx, func(params Writer_write_Params) error { + _, seg, err := capnp.NewMessage(arena) + if err != nil { + return err + } + wr, err := NewRootWriteRequest(seg) + if err != nil { + return err + } + if err := params.SetWr(wr); err != nil { + return err + } + wr, err = params.Wr() + if err != nil { + return err + } + return BuildInto(wr, in.Tenant, in.Timeseries) + }) + defer release() + + s, err := result.Struct() + if err != nil { + if numReconnects > 0 && capnp.IsDisconnected(err) { + level.Warn(r.logger).Log("msg", "rpc failed, reconnecting") + if err := r.Close(); err != nil { + return nil, err + } + numReconnects-- + return r.writeWithReconnect(ctx, numReconnects, in) + } + return nil, errors.Wrap(err, "failed writing to peer") + } + switch s.Error() { + case WriteError_unavailable: + return nil, status.Error(codes.Unavailable, "rpc failed") + case WriteError_alreadyExists: + return nil, status.Error(codes.AlreadyExists, "rpc failed") + case WriteError_invalidArgument: + return nil, status.Error(codes.InvalidArgument, "rpc failed") + case WriteError_internal: + return nil, status.Error(codes.Internal, "rpc failed") + default: + return &storepb.WriteResponse{}, nil + } +} + +func (r *RemoteWriteClient) connect(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.conn != nil { + return nil + } + + conn, err := r.dialer.Dial() + if err != nil { + return errors.Wrap(err, "failed to dial peer") + } + r.conn = rpc.NewConn(rpc.NewPackedStreamTransport(conn), nil) + r.writer = Writer(r.conn.Bootstrap(ctx)) + return nil +} + +func (r *RemoteWriteClient) Close() error { + r.mu.Lock() + if r.conn != nil { + conn := r.conn + r.conn = nil + go conn.Close() + } + r.mu.Unlock() + return nil +} diff --git a/pkg/receive/writecapnp/marshal.go b/pkg/receive/writecapnp/marshal.go new file mode 100644 index 0000000000..2d42d60b84 --- /dev/null +++ b/pkg/receive/writecapnp/marshal.go @@ -0,0 +1,281 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "capnproto.org/go/capnp/v3" + + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb/prompb" +) + +func Marshal(tenant string, tsreq []prompb.TimeSeries) ([]byte, error) { + wr, err := Build(tenant, tsreq) + if err != nil { + return nil, err + } + + return wr.Message().Marshal() +} + +func MarshalPacked(tenant string, tsreq []prompb.TimeSeries) ([]byte, error) { + wr, err := Build(tenant, tsreq) + if err != nil { + return nil, err + } + + return wr.Message().MarshalPacked() +} + +func Build(tenant string, tsreq []prompb.TimeSeries) (WriteRequest, error) { + arena := capnp.SingleSegment(nil) + _, seg, err := capnp.NewMessage(arena) + if err != nil { + return WriteRequest{}, err + } + wr, err := NewRootWriteRequest(seg) + if err != nil { + return WriteRequest{}, err + } + if err := BuildInto(wr, tenant, tsreq); err != nil { + return WriteRequest{}, err + } + return wr, nil +} + +func BuildInto(wr WriteRequest, tenant string, tsreq []prompb.TimeSeries) error { + if err := wr.SetTenant(tenant); err != nil { + return err + } + + series, err := wr.NewTimeSeries(int32(len(tsreq))) + if err != nil { + return err + } + builder := newSymbolsBuilder() + for i, ts := range tsreq { + tsc := series.At(i) + + lblsc, err := tsc.NewLabels(int32(len(ts.Labels))) + if err != nil { + return err + } + if err := marshalLabels(lblsc, ts.Labels, builder); err != nil { + return err + } + if err := marshalSamples(tsc, ts.Samples); err != nil { + return err + } + if err := marshalHistograms(tsc, ts.Histograms); err != nil { + return err + } + if err := marshalExemplars(tsc, ts.Exemplars, builder); err != nil { + return err + } + } + + symbols, err := wr.NewSymbols() + if err != nil { + return err + } + return marshalSymbols(builder, symbols) +} + +func marshalSymbols(builder *symbolsBuilder, symbols Symbols) error { + offsets, err := symbols.NewOffsets(builder.len()) + if err != nil { + return err + } + data := make([]byte, builder.symbolsSize) + for k, entry := range builder.table { + end := entry.start + uint32(len(k)) + copy(data[entry.start:end], k) + offsets.Set(int(entry.index), end) + } + + return symbols.SetData(data) +} + +func marshalLabels(lbls Label_List, pbLbls []labelpb.ZLabel, symbols *symbolsBuilder) error { + for i, pbLbl := range pbLbls { + lbl := lbls.At(i) + lbl.SetName(symbols.addEntry(pbLbl.Name)) + lbl.SetValue(symbols.addEntry(pbLbl.Value)) + } + return nil +} + +func marshalSamples(ts TimeSeries, pbSamples []prompb.Sample) error { + samples, err := ts.NewSamples(int32(len(pbSamples))) + if err != nil { + return err + } + + for j, sample := range pbSamples { + sc := samples.At(j) + sc.SetTimestamp(sample.Timestamp) + sc.SetValue(sample.Value) + } + return nil +} + +func marshalHistograms(ts TimeSeries, pbHistograms []prompb.Histogram) error { + if len(pbHistograms) == 0 { + return nil + } + histograms, err := ts.NewHistograms(int32(len(pbHistograms))) + if err != nil { + return err + } + for i, h := range pbHistograms { + if err := marshalHistogram(histograms.At(i), h); err != nil { + return err + } + } + return nil +} + +func marshalHistogram(histogram Histogram, h prompb.Histogram) error { + histogram.SetResetHint(Histogram_ResetHint(h.ResetHint)) + switch h.Count.(type) { + case *prompb.Histogram_CountInt: + histogram.Count().SetCountInt(h.GetCountInt()) + case *prompb.Histogram_CountFloat: + histogram.Count().SetCountFloat(h.GetCountFloat()) + } + histogram.SetSum(h.Sum) + histogram.SetSchema(h.Schema) + histogram.SetZeroThreshold(h.ZeroThreshold) + + switch h.ZeroCount.(type) { + case *prompb.Histogram_ZeroCountInt: + histogram.ZeroCount().SetZeroCountInt(h.GetZeroCountInt()) + case *prompb.Histogram_ZeroCountFloat: + histogram.ZeroCount().SetZeroCountFloat(h.GetZeroCountFloat()) + } + + // Negative spans, deltas and counts. + negativeSpans, err := histogram.NewNegativeSpans(int32(len(h.NegativeSpans))) + if err != nil { + return err + } + if err := marshalSpans(negativeSpans, h.NegativeSpans); err != nil { + return err + } + negativeDeltas, err := histogram.NewNegativeDeltas(int32(len(h.NegativeDeltas))) + if err != nil { + return err + } + marshalInt64List(negativeDeltas, h.NegativeDeltas) + + negativeCounts, err := histogram.NewNegativeCounts(int32(len(h.NegativeCounts))) + if err != nil { + return err + } + marshalFloat64List(negativeCounts, h.NegativeCounts) + + // Positive spans, deltas and counts. + positiveSpans, err := histogram.NewPositiveSpans(int32(len(h.PositiveSpans))) + if err != nil { + return err + } + if err := marshalSpans(positiveSpans, h.PositiveSpans); err != nil { + return err + } + positiveDeltas, err := histogram.NewPositiveDeltas(int32(len(h.PositiveDeltas))) + if err != nil { + return err + } + marshalInt64List(positiveDeltas, h.PositiveDeltas) + + positiveCounts, err := histogram.NewPositiveCounts(int32(len(h.PositiveCounts))) + if err != nil { + return err + } + marshalFloat64List(positiveCounts, h.PositiveCounts) + + histogram.SetTimestamp(h.Timestamp) + + return nil +} + +func marshalSpans(spans BucketSpan_List, pbSpans []prompb.BucketSpan) error { + for j, s := range pbSpans { + span := spans.At(j) + span.SetOffset(s.Offset) + span.SetLength(s.Length) + } + return nil +} + +func marshalExemplars(ts TimeSeries, pbExemplars []prompb.Exemplar, symbols *symbolsBuilder) error { + if len(pbExemplars) == 0 { + return nil + } + + exemplars, err := ts.NewExemplars(int32(len(pbExemplars))) + if err != nil { + return err + } + for i := 0; i < len(pbExemplars); i++ { + ex := exemplars.At(i) + + lbls, err := ex.NewLabels(int32(len(pbExemplars[i].Labels))) + if err != nil { + return err + } + if err := marshalLabels(lbls, pbExemplars[i].Labels, symbols); err != nil { + return err + } + ex.SetValue(pbExemplars[i].Value) + ex.SetTimestamp(pbExemplars[i].Timestamp) + } + return nil +} + +func marshalInt64List(list capnp.Int64List, ints []int64) { + for j, d := range ints { + list.Set(j, d) + } +} + +func marshalFloat64List(list capnp.Float64List, ints []float64) { + for j, d := range ints { + list.Set(j, d) + } +} + +type symbolsBuilder struct { + table map[string]tableEntry + symbolsSize uint32 +} + +func newSymbolsBuilder() *symbolsBuilder { + return &symbolsBuilder{ + table: make(map[string]tableEntry), + } +} + +func (s *symbolsBuilder) addEntry(item string) uint32 { + entry, ok := s.table[item] + if ok { + return entry.index + } + entry = tableEntry{ + index: uint32(len(s.table)), + start: s.symbolsSize, + } + s.symbolsSize += uint32(len(item)) + s.table[item] = entry + return entry.index +} + +func (s *symbolsBuilder) len() int32 { + return int32(len(s.table)) +} + +type tableEntry struct { + index uint32 + start uint32 +} diff --git a/pkg/receive/writecapnp/marshal_bench_test.go b/pkg/receive/writecapnp/marshal_bench_test.go new file mode 100644 index 0000000000..2b29426daf --- /dev/null +++ b/pkg/receive/writecapnp/marshal_bench_test.go @@ -0,0 +1,144 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "bytes" + "fmt" + "testing" + + "capnproto.org/go/capnp/v3" + "github.com/stretchr/testify/require" + + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/store/storepb/prompb" +) + +func BenchmarkMarshalWriteRequest(b *testing.B) { + const ( + numSeries = 2 + numClusters = 3 + numPods = 2 + ) + series := make([]prompb.TimeSeries, 0, numSeries) + for i := 0; i < numSeries; i++ { + lbls := make([]labelpb.ZLabel, 0, numClusters*numPods) + for j := 0; j < numClusters; j++ { + for k := 0; k < numPods; k++ { + lbls = append(lbls, labelpb.ZLabel{ + Name: fmt.Sprintf("cluster-%d", j), + Value: fmt.Sprintf("pod-%d", k), + }) + } + } + series = append(series, prompb.TimeSeries{ + Labels: lbls, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: 2, + }, + }, + }) + } + wreq := storepb.WriteRequest{ + Tenant: "example-tenant", + Timeseries: series, + } + var ( + protoBytes, err = wreq.Marshal() + capnprotoBytes, paddedErr = Marshal(wreq.Tenant, wreq.Timeseries) + capnprotoBytesPacked, packedErr = MarshalPacked(wreq.Tenant, wreq.Timeseries) + ) + require.NoError(b, err) + require.NoError(b, paddedErr) + require.NoError(b, packedErr) + b.Run("marshal_proto", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var err error + _, err = wreq.Marshal() + require.NoError(b, err) + } + }) + b.Run("build", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := Build("example_tenant", wreq.Timeseries) + require.NoError(b, err) + } + }) + b.Run("encode", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var err error + wr, err := Build("example_tenant", wreq.Timeseries) + require.NoError(b, err) + + buf := bytes.NewBuffer(nil) + require.NoError(b, capnp.NewEncoder(buf).Encode(wr.Message())) + } + }) + b.Run("encode_packed", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var err error + wr, err := Build("example_tenant", wreq.Timeseries) + require.NoError(b, err) + + buf := bytes.NewBuffer(nil) + require.NoError(b, capnp.NewPackedEncoder(buf).Encode(wr.Message())) + } + }) + + b.Run("unmarshal_proto", func(b *testing.B) { + for i := 0; i < b.N; i++ { + wr := storepb.WriteRequest{} + require.NoError(b, wr.Unmarshal(protoBytes)) + } + }) + b.Run("unmarshal", func(b *testing.B) { + for i := 0; i < b.N; i++ { + msg, err := capnp.Unmarshal(capnprotoBytes) + require.NoError(b, err) + + _, err = ReadRootWriteRequest(msg) + require.NoError(b, err) + } + }) + b.Run("unmarshal_packed", func(b *testing.B) { + for i := 0; i < b.N; i++ { + msg, err := capnp.UnmarshalPacked(capnprotoBytesPacked) + require.NoError(b, err) + + _, err = ReadRootWriteRequest(msg) + require.NoError(b, err) + } + }) + + b.Run("decoder", func(b *testing.B) { + for i := 0; i < b.N; i++ { + msg, err := capnp.NewDecoder(bytes.NewReader(capnprotoBytes)).Decode() + require.NoError(b, err) + + _, err = ReadRootWriteRequest(msg) + require.NoError(b, err) + } + }) + b.Run("decoder_packed", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + msg, err := capnp.NewPackedDecoder(bytes.NewReader(capnprotoBytesPacked)).Decode() + require.NoError(b, err) + + wr, err := ReadRootWriteRequest(msg) + require.NoError(b, err) + + var ts Series + iter, err := NewRequest(wr) + require.NoError(b, err) + for iter.Next() { + require.NoError(b, iter.At(&ts)) + } + iter.Close() + } + }) +} diff --git a/pkg/receive/writecapnp/marshal_test.go b/pkg/receive/writecapnp/marshal_test.go new file mode 100644 index 0000000000..bffc7a6d6a --- /dev/null +++ b/pkg/receive/writecapnp/marshal_test.go @@ -0,0 +1,222 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "fmt" + "testing" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + + "capnproto.org/go/capnp/v3" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/require" + + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/store/storepb/prompb" +) + +func TestMarshalWriteRequest(t *testing.T) { + testHistogram := &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 18.4, + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{1, 1, -1, 0}, + } + + wreq := storepb.WriteRequest{ + Tenant: "example-tenant", + Timeseries: []prompb.TimeSeries{ + { + Labels: []labelpb.ZLabel{ + {Name: "__name__", Value: "up"}, + {Name: "job", Value: "prometheus"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 1, Value: 1}, + {Timestamp: 2, Value: 2}, + }, + Histograms: []prompb.Histogram{ + prompb.HistogramToHistogramProto(1, testHistogram), + prompb.FloatHistogramToHistogramProto(2, tsdbutil.GenerateTestFloatHistogram(2)), + }, + Exemplars: []prompb.Exemplar{ + { + Labels: []labelpb.ZLabel{{Name: "traceID", Value: "1234"}}, + Value: 10, + Timestamp: 14, + }, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "__name__", Value: "up"}, + {Name: "job", Value: "thanos"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 3, Value: 3}, + {Timestamp: 4, Value: 4}, + }, + }, + }, + } + b, err := Marshal(wreq.Tenant, wreq.Timeseries) + require.NoError(t, err) + + msg, err := capnp.Unmarshal(b) + require.NoError(t, err) + + wr, err := ReadRootWriteRequest(msg) + require.NoError(t, err) + + tenant, err := wr.Tenant() + require.NoError(t, err) + require.Equal(t, wreq.Tenant, tenant) + + series, err := wr.TimeSeries() + require.NoError(t, err) + require.Equal(t, len(wreq.Timeseries), series.Len()) + + var ( + i int + actual Series + ) + request, err := NewRequest(wr) + require.NoError(t, err) + for request.Next() { + require.NoError(t, request.At(&actual)) + expected := wreq.Timeseries[i] + + t.Run("test_labels", func(t *testing.T) { + builder := labels.ScratchBuilder{} + for _, lbl := range expected.Labels { + builder.Add(lbl.Name, lbl.Value) + } + builder.Sort() + require.Equal(t, builder.Labels(), actual.Labels, fmt.Sprintf("incorrect series labels at %d", i)) + }) + t.Run("test_float_samples", func(t *testing.T) { + expectedSamples := make([]FloatSample, 0) + for _, s := range expected.Samples { + expectedSamples = append(expectedSamples, FloatSample{ + Value: s.Value, + Timestamp: s.Timestamp, + }) + } + require.Equal(t, expectedSamples, actual.Samples, fmt.Sprintf("incorrect series samples at %d", i)) + }) + t.Run("test_histogram_samples", func(t *testing.T) { + for i, hs := range expected.Histograms { + require.Equal(t, hs.Timestamp, actual.Histograms[i].Timestamp) + if hs.IsFloatHistogram() { + fh := prompb.FloatHistogramProtoToFloatHistogram(hs) + require.Equal(t, fh, actual.Histograms[i].FloatHistogram) + } else { + h := prompb.HistogramProtoToHistogram(hs) + require.Equal(t, h, actual.Histograms[i].Histogram) + } + } + }) + t.Run("test_exemplars", func(t *testing.T) { + for i, ex := range expected.Exemplars { + require.Equal(t, labelpb.ZLabelsToPromLabels(ex.Labels), actual.Exemplars[i].Labels) + require.Equal(t, ex.Timestamp, actual.Exemplars[i].Ts) + require.Equal(t, ex.Value, actual.Exemplars[i].Value) + } + }) + + i++ + } +} + +func TestMarshalWithMultipleHistogramSeries(t *testing.T) { + wreq := storepb.WriteRequest{ + Tenant: "example-tenant", + Timeseries: []prompb.TimeSeries{ + { + Labels: []labelpb.ZLabel{ + {Name: "job", Value: "prometheus-1"}, + }, + Histograms: []prompb.Histogram{ + prompb.HistogramToHistogramProto(1, &histogram.Histogram{}), + prompb.HistogramToHistogramProto(1, tsdbutil.GenerateTestHistogram(1)), + prompb.FloatHistogramToHistogramProto(2, tsdbutil.GenerateTestFloatHistogram(2)), + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "job", Value: "prometheus-2"}, + }, + Histograms: []prompb.Histogram{ + prompb.HistogramToHistogramProto(1, tsdbutil.GenerateTestHistogram(1)), + prompb.FloatHistogramToHistogramProto(2, tsdbutil.GenerateTestFloatHistogram(2)), + prompb.HistogramToHistogramProto(1, &histogram.Histogram{}), + }, + }, + }, + } + b, err := Marshal(wreq.Tenant, wreq.Timeseries) + require.NoError(t, err) + + msg, err := capnp.Unmarshal(b) + require.NoError(t, err) + + wr, err := ReadRootWriteRequest(msg) + require.NoError(t, err) + + tenant, err := wr.Tenant() + require.NoError(t, err) + require.Equal(t, wreq.Tenant, tenant) + + series, err := wr.TimeSeries() + require.NoError(t, err) + require.Equal(t, len(wreq.Timeseries), series.Len()) + var ( + current Series + + readHistograms []*histogram.Histogram + readFloatHistograms []*histogram.FloatHistogram + ) + request, err := NewRequest(wr) + require.NoError(t, err) + + for request.Next() { + require.NoError(t, request.At(¤t)) + for _, h := range current.Histograms { + if h.FloatHistogram != nil { + readFloatHistograms = append(readFloatHistograms, h.FloatHistogram) + } else { + readHistograms = append(readHistograms, h.Histogram) + } + } + } + var ( + histograms []*histogram.Histogram + floatHistograms []*histogram.FloatHistogram + ) + for _, ts := range wreq.Timeseries { + for _, h := range ts.Histograms { + if h.IsFloatHistogram() { + floatHistograms = append(floatHistograms, prompb.FloatHistogramProtoToFloatHistogram(h)) + } else { + histograms = append(histograms, prompb.HistogramProtoToHistogram(h)) + } + } + } + require.Equal(t, histograms, readHistograms) + require.Equal(t, floatHistograms, readFloatHistograms) +} diff --git a/pkg/receive/writecapnp/write_request.capnp b/pkg/receive/writecapnp/write_request.capnp new file mode 100644 index 0000000000..7c82edc5b3 --- /dev/null +++ b/pkg/receive/writecapnp/write_request.capnp @@ -0,0 +1,92 @@ +using Go = import "/go.capnp"; +@0x85d3acc39d94e0f8; + +$Go.package("writecapnp"); +$Go.import("writecapnp"); + +struct Symbols { + data @0 :Data; + offsets @1 :List(UInt32); +} + +struct Label { + name @0 :UInt32; + value @1 :UInt32; +} + +struct Sample { + timestamp @0 :Int64; + value @1 :Float64; +} + +struct BucketSpan { + offset @0 :Int32; + length @1 :UInt32; +} + +struct Histogram { + enum ResetHint { + unknown @0; + yes @1; + no @2; + gauge @3; + } + + count :union { + countInt @0 :UInt64; + countFloat @1 :Float64; + } + + sum @2 :Float64; + schema @3 :Int32; + zeroThreshold @4 :Float64; + + zeroCount :union { + zeroCountInt @5 :UInt64; + zeroCountFloat @6 :Float64; + } + + negativeSpans @7 :List(BucketSpan); + negativeDeltas @8 :List(Int64); + negativeCounts @9 :List(Float64); + + positiveSpans @10 :List(BucketSpan); + positiveDeltas @11 :List(Int64); + positiveCounts @12 :List(Float64); + + resetHint @13 :ResetHint; + + timestamp @14 :Int64; +} + +struct Exemplar { + labels @0 :List(Label); + value @1 :Float64; + timestamp @2 :Int64; +} + +struct TimeSeries { + labels @0 :List(Label); + samples @1 :List(Sample); + histograms @2: List(Histogram); + exemplars @3: List(Exemplar); +} + +struct WriteRequest { + symbols @0: Symbols; + timeSeries @1 :List(TimeSeries); + tenant @2: Text; +} + +enum WriteError { + none @0; + unavailable @1; + alreadyExists @2; + invalidArgument @3; + internal @4; +} + +interface Writer { + write @0 (wr :WriteRequest) -> (error :WriteError); +} + diff --git a/pkg/receive/writecapnp/write_request.capnp.go b/pkg/receive/writecapnp/write_request.capnp.go new file mode 100644 index 0000000000..45b35e985d --- /dev/null +++ b/pkg/receive/writecapnp/write_request.capnp.go @@ -0,0 +1,1694 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Code generated by capnpc-go. DO NOT EDIT. + +package writecapnp + +import ( + context "context" + math "math" + strconv "strconv" + + capnp "capnproto.org/go/capnp/v3" + text "capnproto.org/go/capnp/v3/encoding/text" + fc "capnproto.org/go/capnp/v3/flowcontrol" + schemas "capnproto.org/go/capnp/v3/schemas" + server "capnproto.org/go/capnp/v3/server" +) + +type Symbols capnp.Struct + +// Symbols_TypeID is the unique identifier for the type Symbols. +const Symbols_TypeID = 0xab79e1a6ecfeb87a + +func NewSymbols(s *capnp.Segment) (Symbols, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return Symbols(st), err +} + +func NewRootSymbols(s *capnp.Segment) (Symbols, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) + return Symbols(st), err +} + +func ReadRootSymbols(msg *capnp.Message) (Symbols, error) { + root, err := msg.Root() + return Symbols(root.Struct()), err +} + +func (s Symbols) String() string { + str, _ := text.Marshal(0xab79e1a6ecfeb87a, capnp.Struct(s)) + return str +} + +func (s Symbols) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Symbols) DecodeFromPtr(p capnp.Ptr) Symbols { + return Symbols(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Symbols) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Symbols) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Symbols) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Symbols) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Symbols) Data() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(0) + return []byte(p.Data()), err +} + +func (s Symbols) HasData() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Symbols) SetData(v []byte) error { + return capnp.Struct(s).SetData(0, v) +} + +func (s Symbols) Offsets() (capnp.UInt32List, error) { + p, err := capnp.Struct(s).Ptr(1) + return capnp.UInt32List(p.List()), err +} + +func (s Symbols) HasOffsets() bool { + return capnp.Struct(s).HasPtr(1) +} + +func (s Symbols) SetOffsets(v capnp.UInt32List) error { + return capnp.Struct(s).SetPtr(1, v.ToPtr()) +} + +// NewOffsets sets the offsets field to a newly +// allocated capnp.UInt32List, preferring placement in s's segment. +func (s Symbols) NewOffsets(n int32) (capnp.UInt32List, error) { + l, err := capnp.NewUInt32List(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.UInt32List{}, err + } + err = capnp.Struct(s).SetPtr(1, l.ToPtr()) + return l, err +} + +// Symbols_List is a list of Symbols. +type Symbols_List = capnp.StructList[Symbols] + +// NewSymbols creates a new list of Symbols. +func NewSymbols_List(s *capnp.Segment, sz int32) (Symbols_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) + return capnp.StructList[Symbols](l), err +} + +// Symbols_Future is a wrapper for a Symbols promised by a client call. +type Symbols_Future struct{ *capnp.Future } + +func (f Symbols_Future) Struct() (Symbols, error) { + p, err := f.Future.Ptr() + return Symbols(p.Struct()), err +} + +type Label capnp.Struct + +// Label_TypeID is the unique identifier for the type Label. +const Label_TypeID = 0xf192c7ee07114b32 + +func NewLabel(s *capnp.Segment) (Label, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return Label(st), err +} + +func NewRootLabel(s *capnp.Segment) (Label, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return Label(st), err +} + +func ReadRootLabel(msg *capnp.Message) (Label, error) { + root, err := msg.Root() + return Label(root.Struct()), err +} + +func (s Label) String() string { + str, _ := text.Marshal(0xf192c7ee07114b32, capnp.Struct(s)) + return str +} + +func (s Label) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Label) DecodeFromPtr(p capnp.Ptr) Label { + return Label(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Label) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Label) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Label) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Label) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Label) Name() uint32 { + return capnp.Struct(s).Uint32(0) +} + +func (s Label) SetName(v uint32) { + capnp.Struct(s).SetUint32(0, v) +} + +func (s Label) Value() uint32 { + return capnp.Struct(s).Uint32(4) +} + +func (s Label) SetValue(v uint32) { + capnp.Struct(s).SetUint32(4, v) +} + +// Label_List is a list of Label. +type Label_List = capnp.StructList[Label] + +// NewLabel creates a new list of Label. +func NewLabel_List(s *capnp.Segment, sz int32) (Label_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz) + return capnp.StructList[Label](l), err +} + +// Label_Future is a wrapper for a Label promised by a client call. +type Label_Future struct{ *capnp.Future } + +func (f Label_Future) Struct() (Label, error) { + p, err := f.Future.Ptr() + return Label(p.Struct()), err +} + +type Sample capnp.Struct + +// Sample_TypeID is the unique identifier for the type Sample. +const Sample_TypeID = 0xef49df6cfa8875de + +func NewSample(s *capnp.Segment) (Sample, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return Sample(st), err +} + +func NewRootSample(s *capnp.Segment) (Sample, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}) + return Sample(st), err +} + +func ReadRootSample(msg *capnp.Message) (Sample, error) { + root, err := msg.Root() + return Sample(root.Struct()), err +} + +func (s Sample) String() string { + str, _ := text.Marshal(0xef49df6cfa8875de, capnp.Struct(s)) + return str +} + +func (s Sample) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Sample) DecodeFromPtr(p capnp.Ptr) Sample { + return Sample(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Sample) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Sample) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Sample) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Sample) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Sample) Timestamp() int64 { + return int64(capnp.Struct(s).Uint64(0)) +} + +func (s Sample) SetTimestamp(v int64) { + capnp.Struct(s).SetUint64(0, uint64(v)) +} + +func (s Sample) Value() float64 { + return math.Float64frombits(capnp.Struct(s).Uint64(8)) +} + +func (s Sample) SetValue(v float64) { + capnp.Struct(s).SetUint64(8, math.Float64bits(v)) +} + +// Sample_List is a list of Sample. +type Sample_List = capnp.StructList[Sample] + +// NewSample creates a new list of Sample. +func NewSample_List(s *capnp.Segment, sz int32) (Sample_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 0}, sz) + return capnp.StructList[Sample](l), err +} + +// Sample_Future is a wrapper for a Sample promised by a client call. +type Sample_Future struct{ *capnp.Future } + +func (f Sample_Future) Struct() (Sample, error) { + p, err := f.Future.Ptr() + return Sample(p.Struct()), err +} + +type BucketSpan capnp.Struct + +// BucketSpan_TypeID is the unique identifier for the type BucketSpan. +const BucketSpan_TypeID = 0x983649d193295eae + +func NewBucketSpan(s *capnp.Segment) (BucketSpan, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return BucketSpan(st), err +} + +func NewRootBucketSpan(s *capnp.Segment) (BucketSpan, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return BucketSpan(st), err +} + +func ReadRootBucketSpan(msg *capnp.Message) (BucketSpan, error) { + root, err := msg.Root() + return BucketSpan(root.Struct()), err +} + +func (s BucketSpan) String() string { + str, _ := text.Marshal(0x983649d193295eae, capnp.Struct(s)) + return str +} + +func (s BucketSpan) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (BucketSpan) DecodeFromPtr(p capnp.Ptr) BucketSpan { + return BucketSpan(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s BucketSpan) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s BucketSpan) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s BucketSpan) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s BucketSpan) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s BucketSpan) Offset() int32 { + return int32(capnp.Struct(s).Uint32(0)) +} + +func (s BucketSpan) SetOffset(v int32) { + capnp.Struct(s).SetUint32(0, uint32(v)) +} + +func (s BucketSpan) Length() uint32 { + return capnp.Struct(s).Uint32(4) +} + +func (s BucketSpan) SetLength(v uint32) { + capnp.Struct(s).SetUint32(4, v) +} + +// BucketSpan_List is a list of BucketSpan. +type BucketSpan_List = capnp.StructList[BucketSpan] + +// NewBucketSpan creates a new list of BucketSpan. +func NewBucketSpan_List(s *capnp.Segment, sz int32) (BucketSpan_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz) + return capnp.StructList[BucketSpan](l), err +} + +// BucketSpan_Future is a wrapper for a BucketSpan promised by a client call. +type BucketSpan_Future struct{ *capnp.Future } + +func (f BucketSpan_Future) Struct() (BucketSpan, error) { + p, err := f.Future.Ptr() + return BucketSpan(p.Struct()), err +} + +type Histogram capnp.Struct +type Histogram_count Histogram +type Histogram_zeroCount Histogram +type Histogram_count_Which uint16 + +const ( + Histogram_count_Which_countInt Histogram_count_Which = 0 + Histogram_count_Which_countFloat Histogram_count_Which = 1 +) + +func (w Histogram_count_Which) String() string { + const s = "countIntcountFloat" + switch w { + case Histogram_count_Which_countInt: + return s[0:8] + case Histogram_count_Which_countFloat: + return s[8:18] + + } + return "Histogram_count_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +type Histogram_zeroCount_Which uint16 + +const ( + Histogram_zeroCount_Which_zeroCountInt Histogram_zeroCount_Which = 0 + Histogram_zeroCount_Which_zeroCountFloat Histogram_zeroCount_Which = 1 +) + +func (w Histogram_zeroCount_Which) String() string { + const s = "zeroCountIntzeroCountFloat" + switch w { + case Histogram_zeroCount_Which_zeroCountInt: + return s[0:12] + case Histogram_zeroCount_Which_zeroCountFloat: + return s[12:26] + + } + return "Histogram_zeroCount_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// Histogram_TypeID is the unique identifier for the type Histogram. +const Histogram_TypeID = 0xc4dd3c458256382a + +func NewHistogram(s *capnp.Segment) (Histogram, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 56, PointerCount: 6}) + return Histogram(st), err +} + +func NewRootHistogram(s *capnp.Segment) (Histogram, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 56, PointerCount: 6}) + return Histogram(st), err +} + +func ReadRootHistogram(msg *capnp.Message) (Histogram, error) { + root, err := msg.Root() + return Histogram(root.Struct()), err +} + +func (s Histogram) String() string { + str, _ := text.Marshal(0xc4dd3c458256382a, capnp.Struct(s)) + return str +} + +func (s Histogram) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Histogram) DecodeFromPtr(p capnp.Ptr) Histogram { + return Histogram(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Histogram) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Histogram) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Histogram) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Histogram) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Histogram) Count() Histogram_count { return Histogram_count(s) } + +func (s Histogram_count) Which() Histogram_count_Which { + return Histogram_count_Which(capnp.Struct(s).Uint16(8)) +} +func (s Histogram_count) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Histogram_count) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Histogram_count) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Histogram_count) CountInt() uint64 { + if capnp.Struct(s).Uint16(8) != 0 { + panic("Which() != countInt") + } + return capnp.Struct(s).Uint64(0) +} + +func (s Histogram_count) SetCountInt(v uint64) { + capnp.Struct(s).SetUint16(8, 0) + capnp.Struct(s).SetUint64(0, v) +} + +func (s Histogram_count) CountFloat() float64 { + if capnp.Struct(s).Uint16(8) != 1 { + panic("Which() != countFloat") + } + return math.Float64frombits(capnp.Struct(s).Uint64(0)) +} + +func (s Histogram_count) SetCountFloat(v float64) { + capnp.Struct(s).SetUint16(8, 1) + capnp.Struct(s).SetUint64(0, math.Float64bits(v)) +} + +func (s Histogram) Sum() float64 { + return math.Float64frombits(capnp.Struct(s).Uint64(16)) +} + +func (s Histogram) SetSum(v float64) { + capnp.Struct(s).SetUint64(16, math.Float64bits(v)) +} + +func (s Histogram) Schema() int32 { + return int32(capnp.Struct(s).Uint32(12)) +} + +func (s Histogram) SetSchema(v int32) { + capnp.Struct(s).SetUint32(12, uint32(v)) +} + +func (s Histogram) ZeroThreshold() float64 { + return math.Float64frombits(capnp.Struct(s).Uint64(24)) +} + +func (s Histogram) SetZeroThreshold(v float64) { + capnp.Struct(s).SetUint64(24, math.Float64bits(v)) +} + +func (s Histogram) ZeroCount() Histogram_zeroCount { return Histogram_zeroCount(s) } + +func (s Histogram_zeroCount) Which() Histogram_zeroCount_Which { + return Histogram_zeroCount_Which(capnp.Struct(s).Uint16(10)) +} +func (s Histogram_zeroCount) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Histogram_zeroCount) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Histogram_zeroCount) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Histogram_zeroCount) ZeroCountInt() uint64 { + if capnp.Struct(s).Uint16(10) != 0 { + panic("Which() != zeroCountInt") + } + return capnp.Struct(s).Uint64(32) +} + +func (s Histogram_zeroCount) SetZeroCountInt(v uint64) { + capnp.Struct(s).SetUint16(10, 0) + capnp.Struct(s).SetUint64(32, v) +} + +func (s Histogram_zeroCount) ZeroCountFloat() float64 { + if capnp.Struct(s).Uint16(10) != 1 { + panic("Which() != zeroCountFloat") + } + return math.Float64frombits(capnp.Struct(s).Uint64(32)) +} + +func (s Histogram_zeroCount) SetZeroCountFloat(v float64) { + capnp.Struct(s).SetUint16(10, 1) + capnp.Struct(s).SetUint64(32, math.Float64bits(v)) +} + +func (s Histogram) NegativeSpans() (BucketSpan_List, error) { + p, err := capnp.Struct(s).Ptr(0) + return BucketSpan_List(p.List()), err +} + +func (s Histogram) HasNegativeSpans() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Histogram) SetNegativeSpans(v BucketSpan_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) +} + +// NewNegativeSpans sets the negativeSpans field to a newly +// allocated BucketSpan_List, preferring placement in s's segment. +func (s Histogram) NewNegativeSpans(n int32) (BucketSpan_List, error) { + l, err := NewBucketSpan_List(capnp.Struct(s).Segment(), n) + if err != nil { + return BucketSpan_List{}, err + } + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s Histogram) NegativeDeltas() (capnp.Int64List, error) { + p, err := capnp.Struct(s).Ptr(1) + return capnp.Int64List(p.List()), err +} + +func (s Histogram) HasNegativeDeltas() bool { + return capnp.Struct(s).HasPtr(1) +} + +func (s Histogram) SetNegativeDeltas(v capnp.Int64List) error { + return capnp.Struct(s).SetPtr(1, v.ToPtr()) +} + +// NewNegativeDeltas sets the negativeDeltas field to a newly +// allocated capnp.Int64List, preferring placement in s's segment. +func (s Histogram) NewNegativeDeltas(n int32) (capnp.Int64List, error) { + l, err := capnp.NewInt64List(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.Int64List{}, err + } + err = capnp.Struct(s).SetPtr(1, l.ToPtr()) + return l, err +} +func (s Histogram) NegativeCounts() (capnp.Float64List, error) { + p, err := capnp.Struct(s).Ptr(2) + return capnp.Float64List(p.List()), err +} + +func (s Histogram) HasNegativeCounts() bool { + return capnp.Struct(s).HasPtr(2) +} + +func (s Histogram) SetNegativeCounts(v capnp.Float64List) error { + return capnp.Struct(s).SetPtr(2, v.ToPtr()) +} + +// NewNegativeCounts sets the negativeCounts field to a newly +// allocated capnp.Float64List, preferring placement in s's segment. +func (s Histogram) NewNegativeCounts(n int32) (capnp.Float64List, error) { + l, err := capnp.NewFloat64List(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.Float64List{}, err + } + err = capnp.Struct(s).SetPtr(2, l.ToPtr()) + return l, err +} +func (s Histogram) PositiveSpans() (BucketSpan_List, error) { + p, err := capnp.Struct(s).Ptr(3) + return BucketSpan_List(p.List()), err +} + +func (s Histogram) HasPositiveSpans() bool { + return capnp.Struct(s).HasPtr(3) +} + +func (s Histogram) SetPositiveSpans(v BucketSpan_List) error { + return capnp.Struct(s).SetPtr(3, v.ToPtr()) +} + +// NewPositiveSpans sets the positiveSpans field to a newly +// allocated BucketSpan_List, preferring placement in s's segment. +func (s Histogram) NewPositiveSpans(n int32) (BucketSpan_List, error) { + l, err := NewBucketSpan_List(capnp.Struct(s).Segment(), n) + if err != nil { + return BucketSpan_List{}, err + } + err = capnp.Struct(s).SetPtr(3, l.ToPtr()) + return l, err +} +func (s Histogram) PositiveDeltas() (capnp.Int64List, error) { + p, err := capnp.Struct(s).Ptr(4) + return capnp.Int64List(p.List()), err +} + +func (s Histogram) HasPositiveDeltas() bool { + return capnp.Struct(s).HasPtr(4) +} + +func (s Histogram) SetPositiveDeltas(v capnp.Int64List) error { + return capnp.Struct(s).SetPtr(4, v.ToPtr()) +} + +// NewPositiveDeltas sets the positiveDeltas field to a newly +// allocated capnp.Int64List, preferring placement in s's segment. +func (s Histogram) NewPositiveDeltas(n int32) (capnp.Int64List, error) { + l, err := capnp.NewInt64List(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.Int64List{}, err + } + err = capnp.Struct(s).SetPtr(4, l.ToPtr()) + return l, err +} +func (s Histogram) PositiveCounts() (capnp.Float64List, error) { + p, err := capnp.Struct(s).Ptr(5) + return capnp.Float64List(p.List()), err +} + +func (s Histogram) HasPositiveCounts() bool { + return capnp.Struct(s).HasPtr(5) +} + +func (s Histogram) SetPositiveCounts(v capnp.Float64List) error { + return capnp.Struct(s).SetPtr(5, v.ToPtr()) +} + +// NewPositiveCounts sets the positiveCounts field to a newly +// allocated capnp.Float64List, preferring placement in s's segment. +func (s Histogram) NewPositiveCounts(n int32) (capnp.Float64List, error) { + l, err := capnp.NewFloat64List(capnp.Struct(s).Segment(), n) + if err != nil { + return capnp.Float64List{}, err + } + err = capnp.Struct(s).SetPtr(5, l.ToPtr()) + return l, err +} +func (s Histogram) ResetHint() Histogram_ResetHint { + return Histogram_ResetHint(capnp.Struct(s).Uint16(40)) +} + +func (s Histogram) SetResetHint(v Histogram_ResetHint) { + capnp.Struct(s).SetUint16(40, uint16(v)) +} + +func (s Histogram) Timestamp() int64 { + return int64(capnp.Struct(s).Uint64(48)) +} + +func (s Histogram) SetTimestamp(v int64) { + capnp.Struct(s).SetUint64(48, uint64(v)) +} + +// Histogram_List is a list of Histogram. +type Histogram_List = capnp.StructList[Histogram] + +// NewHistogram creates a new list of Histogram. +func NewHistogram_List(s *capnp.Segment, sz int32) (Histogram_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 56, PointerCount: 6}, sz) + return capnp.StructList[Histogram](l), err +} + +// Histogram_Future is a wrapper for a Histogram promised by a client call. +type Histogram_Future struct{ *capnp.Future } + +func (f Histogram_Future) Struct() (Histogram, error) { + p, err := f.Future.Ptr() + return Histogram(p.Struct()), err +} +func (p Histogram_Future) Count() Histogram_count_Future { return Histogram_count_Future{p.Future} } + +// Histogram_count_Future is a wrapper for a Histogram_count promised by a client call. +type Histogram_count_Future struct{ *capnp.Future } + +func (f Histogram_count_Future) Struct() (Histogram_count, error) { + p, err := f.Future.Ptr() + return Histogram_count(p.Struct()), err +} +func (p Histogram_Future) ZeroCount() Histogram_zeroCount_Future { + return Histogram_zeroCount_Future{p.Future} +} + +// Histogram_zeroCount_Future is a wrapper for a Histogram_zeroCount promised by a client call. +type Histogram_zeroCount_Future struct{ *capnp.Future } + +func (f Histogram_zeroCount_Future) Struct() (Histogram_zeroCount, error) { + p, err := f.Future.Ptr() + return Histogram_zeroCount(p.Struct()), err +} + +type Histogram_ResetHint uint16 + +// Histogram_ResetHint_TypeID is the unique identifier for the type Histogram_ResetHint. +const Histogram_ResetHint_TypeID = 0xd5b0cec646441eb0 + +// Values of Histogram_ResetHint. +const ( + Histogram_ResetHint_unknown Histogram_ResetHint = 0 + Histogram_ResetHint_yes Histogram_ResetHint = 1 + Histogram_ResetHint_no Histogram_ResetHint = 2 + Histogram_ResetHint_gauge Histogram_ResetHint = 3 +) + +// String returns the enum's constant name. +func (c Histogram_ResetHint) String() string { + switch c { + case Histogram_ResetHint_unknown: + return "unknown" + case Histogram_ResetHint_yes: + return "yes" + case Histogram_ResetHint_no: + return "no" + case Histogram_ResetHint_gauge: + return "gauge" + + default: + return "" + } +} + +// Histogram_ResetHintFromString returns the enum value with a name, +// or the zero value if there's no such value. +func Histogram_ResetHintFromString(c string) Histogram_ResetHint { + switch c { + case "unknown": + return Histogram_ResetHint_unknown + case "yes": + return Histogram_ResetHint_yes + case "no": + return Histogram_ResetHint_no + case "gauge": + return Histogram_ResetHint_gauge + + default: + return 0 + } +} + +type Histogram_ResetHint_List = capnp.EnumList[Histogram_ResetHint] + +func NewHistogram_ResetHint_List(s *capnp.Segment, sz int32) (Histogram_ResetHint_List, error) { + return capnp.NewEnumList[Histogram_ResetHint](s, sz) +} + +type Exemplar capnp.Struct + +// Exemplar_TypeID is the unique identifier for the type Exemplar. +const Exemplar_TypeID = 0xbd820120399954be + +func NewExemplar(s *capnp.Segment) (Exemplar, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Exemplar(st), err +} + +func NewRootExemplar(s *capnp.Segment) (Exemplar, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}) + return Exemplar(st), err +} + +func ReadRootExemplar(msg *capnp.Message) (Exemplar, error) { + root, err := msg.Root() + return Exemplar(root.Struct()), err +} + +func (s Exemplar) String() string { + str, _ := text.Marshal(0xbd820120399954be, capnp.Struct(s)) + return str +} + +func (s Exemplar) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Exemplar) DecodeFromPtr(p capnp.Ptr) Exemplar { + return Exemplar(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Exemplar) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Exemplar) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Exemplar) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Exemplar) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Exemplar) Labels() (Label_List, error) { + p, err := capnp.Struct(s).Ptr(0) + return Label_List(p.List()), err +} + +func (s Exemplar) HasLabels() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Exemplar) SetLabels(v Label_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) +} + +// NewLabels sets the labels field to a newly +// allocated Label_List, preferring placement in s's segment. +func (s Exemplar) NewLabels(n int32) (Label_List, error) { + l, err := NewLabel_List(capnp.Struct(s).Segment(), n) + if err != nil { + return Label_List{}, err + } + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s Exemplar) Value() float64 { + return math.Float64frombits(capnp.Struct(s).Uint64(0)) +} + +func (s Exemplar) SetValue(v float64) { + capnp.Struct(s).SetUint64(0, math.Float64bits(v)) +} + +func (s Exemplar) Timestamp() int64 { + return int64(capnp.Struct(s).Uint64(8)) +} + +func (s Exemplar) SetTimestamp(v int64) { + capnp.Struct(s).SetUint64(8, uint64(v)) +} + +// Exemplar_List is a list of Exemplar. +type Exemplar_List = capnp.StructList[Exemplar] + +// NewExemplar creates a new list of Exemplar. +func NewExemplar_List(s *capnp.Segment, sz int32) (Exemplar_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 16, PointerCount: 1}, sz) + return capnp.StructList[Exemplar](l), err +} + +// Exemplar_Future is a wrapper for a Exemplar promised by a client call. +type Exemplar_Future struct{ *capnp.Future } + +func (f Exemplar_Future) Struct() (Exemplar, error) { + p, err := f.Future.Ptr() + return Exemplar(p.Struct()), err +} + +type TimeSeries capnp.Struct + +// TimeSeries_TypeID is the unique identifier for the type TimeSeries. +const TimeSeries_TypeID = 0xb438c10228b97446 + +func NewTimeSeries(s *capnp.Segment) (TimeSeries, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 4}) + return TimeSeries(st), err +} + +func NewRootTimeSeries(s *capnp.Segment) (TimeSeries, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 4}) + return TimeSeries(st), err +} + +func ReadRootTimeSeries(msg *capnp.Message) (TimeSeries, error) { + root, err := msg.Root() + return TimeSeries(root.Struct()), err +} + +func (s TimeSeries) String() string { + str, _ := text.Marshal(0xb438c10228b97446, capnp.Struct(s)) + return str +} + +func (s TimeSeries) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (TimeSeries) DecodeFromPtr(p capnp.Ptr) TimeSeries { + return TimeSeries(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s TimeSeries) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s TimeSeries) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s TimeSeries) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s TimeSeries) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s TimeSeries) Labels() (Label_List, error) { + p, err := capnp.Struct(s).Ptr(0) + return Label_List(p.List()), err +} + +func (s TimeSeries) HasLabels() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s TimeSeries) SetLabels(v Label_List) error { + return capnp.Struct(s).SetPtr(0, v.ToPtr()) +} + +// NewLabels sets the labels field to a newly +// allocated Label_List, preferring placement in s's segment. +func (s TimeSeries) NewLabels(n int32) (Label_List, error) { + l, err := NewLabel_List(capnp.Struct(s).Segment(), n) + if err != nil { + return Label_List{}, err + } + err = capnp.Struct(s).SetPtr(0, l.ToPtr()) + return l, err +} +func (s TimeSeries) Samples() (Sample_List, error) { + p, err := capnp.Struct(s).Ptr(1) + return Sample_List(p.List()), err +} + +func (s TimeSeries) HasSamples() bool { + return capnp.Struct(s).HasPtr(1) +} + +func (s TimeSeries) SetSamples(v Sample_List) error { + return capnp.Struct(s).SetPtr(1, v.ToPtr()) +} + +// NewSamples sets the samples field to a newly +// allocated Sample_List, preferring placement in s's segment. +func (s TimeSeries) NewSamples(n int32) (Sample_List, error) { + l, err := NewSample_List(capnp.Struct(s).Segment(), n) + if err != nil { + return Sample_List{}, err + } + err = capnp.Struct(s).SetPtr(1, l.ToPtr()) + return l, err +} +func (s TimeSeries) Histograms() (Histogram_List, error) { + p, err := capnp.Struct(s).Ptr(2) + return Histogram_List(p.List()), err +} + +func (s TimeSeries) HasHistograms() bool { + return capnp.Struct(s).HasPtr(2) +} + +func (s TimeSeries) SetHistograms(v Histogram_List) error { + return capnp.Struct(s).SetPtr(2, v.ToPtr()) +} + +// NewHistograms sets the histograms field to a newly +// allocated Histogram_List, preferring placement in s's segment. +func (s TimeSeries) NewHistograms(n int32) (Histogram_List, error) { + l, err := NewHistogram_List(capnp.Struct(s).Segment(), n) + if err != nil { + return Histogram_List{}, err + } + err = capnp.Struct(s).SetPtr(2, l.ToPtr()) + return l, err +} +func (s TimeSeries) Exemplars() (Exemplar_List, error) { + p, err := capnp.Struct(s).Ptr(3) + return Exemplar_List(p.List()), err +} + +func (s TimeSeries) HasExemplars() bool { + return capnp.Struct(s).HasPtr(3) +} + +func (s TimeSeries) SetExemplars(v Exemplar_List) error { + return capnp.Struct(s).SetPtr(3, v.ToPtr()) +} + +// NewExemplars sets the exemplars field to a newly +// allocated Exemplar_List, preferring placement in s's segment. +func (s TimeSeries) NewExemplars(n int32) (Exemplar_List, error) { + l, err := NewExemplar_List(capnp.Struct(s).Segment(), n) + if err != nil { + return Exemplar_List{}, err + } + err = capnp.Struct(s).SetPtr(3, l.ToPtr()) + return l, err +} + +// TimeSeries_List is a list of TimeSeries. +type TimeSeries_List = capnp.StructList[TimeSeries] + +// NewTimeSeries creates a new list of TimeSeries. +func NewTimeSeries_List(s *capnp.Segment, sz int32) (TimeSeries_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 4}, sz) + return capnp.StructList[TimeSeries](l), err +} + +// TimeSeries_Future is a wrapper for a TimeSeries promised by a client call. +type TimeSeries_Future struct{ *capnp.Future } + +func (f TimeSeries_Future) Struct() (TimeSeries, error) { + p, err := f.Future.Ptr() + return TimeSeries(p.Struct()), err +} + +type WriteRequest capnp.Struct + +// WriteRequest_TypeID is the unique identifier for the type WriteRequest. +const WriteRequest_TypeID = 0xeb3bcb770c8eb6be + +func NewWriteRequest(s *capnp.Segment) (WriteRequest, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}) + return WriteRequest(st), err +} + +func NewRootWriteRequest(s *capnp.Segment) (WriteRequest, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}) + return WriteRequest(st), err +} + +func ReadRootWriteRequest(msg *capnp.Message) (WriteRequest, error) { + root, err := msg.Root() + return WriteRequest(root.Struct()), err +} + +func (s WriteRequest) String() string { + str, _ := text.Marshal(0xeb3bcb770c8eb6be, capnp.Struct(s)) + return str +} + +func (s WriteRequest) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (WriteRequest) DecodeFromPtr(p capnp.Ptr) WriteRequest { + return WriteRequest(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s WriteRequest) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s WriteRequest) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s WriteRequest) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s WriteRequest) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s WriteRequest) Symbols() (Symbols, error) { + p, err := capnp.Struct(s).Ptr(0) + return Symbols(p.Struct()), err +} + +func (s WriteRequest) HasSymbols() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s WriteRequest) SetSymbols(v Symbols) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewSymbols sets the symbols field to a newly +// allocated Symbols struct, preferring placement in s's segment. +func (s WriteRequest) NewSymbols() (Symbols, error) { + ss, err := NewSymbols(capnp.Struct(s).Segment()) + if err != nil { + return Symbols{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +func (s WriteRequest) TimeSeries() (TimeSeries_List, error) { + p, err := capnp.Struct(s).Ptr(1) + return TimeSeries_List(p.List()), err +} + +func (s WriteRequest) HasTimeSeries() bool { + return capnp.Struct(s).HasPtr(1) +} + +func (s WriteRequest) SetTimeSeries(v TimeSeries_List) error { + return capnp.Struct(s).SetPtr(1, v.ToPtr()) +} + +// NewTimeSeries sets the timeSeries field to a newly +// allocated TimeSeries_List, preferring placement in s's segment. +func (s WriteRequest) NewTimeSeries(n int32) (TimeSeries_List, error) { + l, err := NewTimeSeries_List(capnp.Struct(s).Segment(), n) + if err != nil { + return TimeSeries_List{}, err + } + err = capnp.Struct(s).SetPtr(1, l.ToPtr()) + return l, err +} +func (s WriteRequest) Tenant() (string, error) { + p, err := capnp.Struct(s).Ptr(2) + return p.Text(), err +} + +func (s WriteRequest) HasTenant() bool { + return capnp.Struct(s).HasPtr(2) +} + +func (s WriteRequest) TenantBytes() ([]byte, error) { + p, err := capnp.Struct(s).Ptr(2) + return p.TextBytes(), err +} + +func (s WriteRequest) SetTenant(v string) error { + return capnp.Struct(s).SetText(2, v) +} + +// WriteRequest_List is a list of WriteRequest. +type WriteRequest_List = capnp.StructList[WriteRequest] + +// NewWriteRequest creates a new list of WriteRequest. +func NewWriteRequest_List(s *capnp.Segment, sz int32) (WriteRequest_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 3}, sz) + return capnp.StructList[WriteRequest](l), err +} + +// WriteRequest_Future is a wrapper for a WriteRequest promised by a client call. +type WriteRequest_Future struct{ *capnp.Future } + +func (f WriteRequest_Future) Struct() (WriteRequest, error) { + p, err := f.Future.Ptr() + return WriteRequest(p.Struct()), err +} +func (p WriteRequest_Future) Symbols() Symbols_Future { + return Symbols_Future{Future: p.Future.Field(0, nil)} +} + +type WriteError uint16 + +// WriteError_TypeID is the unique identifier for the type WriteError. +const WriteError_TypeID = 0xe67be4164a39ea55 + +// Values of WriteError. +const ( + WriteError_none WriteError = 0 + WriteError_unavailable WriteError = 1 + WriteError_alreadyExists WriteError = 2 + WriteError_invalidArgument WriteError = 3 + WriteError_internal WriteError = 4 +) + +// String returns the enum's constant name. +func (c WriteError) String() string { + switch c { + case WriteError_none: + return "none" + case WriteError_unavailable: + return "unavailable" + case WriteError_alreadyExists: + return "alreadyExists" + case WriteError_invalidArgument: + return "invalidArgument" + case WriteError_internal: + return "internal" + + default: + return "" + } +} + +// WriteErrorFromString returns the enum value with a name, +// or the zero value if there's no such value. +func WriteErrorFromString(c string) WriteError { + switch c { + case "none": + return WriteError_none + case "unavailable": + return WriteError_unavailable + case "alreadyExists": + return WriteError_alreadyExists + case "invalidArgument": + return WriteError_invalidArgument + case "internal": + return WriteError_internal + + default: + return 0 + } +} + +type WriteError_List = capnp.EnumList[WriteError] + +func NewWriteError_List(s *capnp.Segment, sz int32) (WriteError_List, error) { + return capnp.NewEnumList[WriteError](s, sz) +} + +type Writer capnp.Client + +// Writer_TypeID is the unique identifier for the type Writer. +const Writer_TypeID = 0xcc20b9c332c83b91 + +func (c Writer) Write(ctx context.Context, params func(Writer_write_Params) error) (Writer_write_Results_Future, capnp.ReleaseFunc) { + + s := capnp.Send{ + Method: capnp.Method{ + InterfaceID: 0xcc20b9c332c83b91, + MethodID: 0, + InterfaceName: "pkg/receive/writecapnp/write_request.capnp:Writer", + MethodName: "write", + }, + } + if params != nil { + s.ArgsSize = capnp.ObjectSize{DataSize: 0, PointerCount: 1} + s.PlaceArgs = func(s capnp.Struct) error { return params(Writer_write_Params(s)) } + } + + ans, release := capnp.Client(c).SendCall(ctx, s) + return Writer_write_Results_Future{Future: ans.Future()}, release + +} + +func (c Writer) WaitStreaming() error { + return capnp.Client(c).WaitStreaming() +} + +// String returns a string that identifies this capability for debugging +// purposes. Its format should not be depended on: in particular, it +// should not be used to compare clients. Use IsSame to compare clients +// for equality. +func (c Writer) String() string { + return "Writer(" + capnp.Client(c).String() + ")" +} + +// AddRef creates a new Client that refers to the same capability as c. +// If c is nil or has resolved to null, then AddRef returns nil. +func (c Writer) AddRef() Writer { + return Writer(capnp.Client(c).AddRef()) +} + +// Release releases a capability reference. If this is the last +// reference to the capability, then the underlying resources associated +// with the capability will be released. +// +// Release will panic if c has already been released, but not if c is +// nil or resolved to null. +func (c Writer) Release() { + capnp.Client(c).Release() +} + +// Resolve blocks until the capability is fully resolved or the Context +// expires. +func (c Writer) Resolve(ctx context.Context) error { + return capnp.Client(c).Resolve(ctx) +} + +func (c Writer) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Client(c).EncodeAsPtr(seg) +} + +func (Writer) DecodeFromPtr(p capnp.Ptr) Writer { + return Writer(capnp.Client{}.DecodeFromPtr(p)) +} + +// IsValid reports whether c is a valid reference to a capability. +// A reference is invalid if it is nil, has resolved to null, or has +// been released. +func (c Writer) IsValid() bool { + return capnp.Client(c).IsValid() +} + +// IsSame reports whether c and other refer to a capability created by the +// same call to NewClient. This can return false negatives if c or other +// are not fully resolved: use Resolve if this is an issue. If either +// c or other are released, then IsSame panics. +func (c Writer) IsSame(other Writer) bool { + return capnp.Client(c).IsSame(capnp.Client(other)) +} + +// Update the flowcontrol.FlowLimiter used to manage flow control for +// this client. This affects all future calls, but not calls already +// waiting to send. Passing nil sets the value to flowcontrol.NopLimiter, +// which is also the default. +func (c Writer) SetFlowLimiter(lim fc.FlowLimiter) { + capnp.Client(c).SetFlowLimiter(lim) +} + +// Get the current flowcontrol.FlowLimiter used to manage flow control +// for this client. +func (c Writer) GetFlowLimiter() fc.FlowLimiter { + return capnp.Client(c).GetFlowLimiter() +} + +// A Writer_Server is a Writer with a local implementation. +type Writer_Server interface { + Write(context.Context, Writer_write) error +} + +// Writer_NewServer creates a new Server from an implementation of Writer_Server. +func Writer_NewServer(s Writer_Server) *server.Server { + c, _ := s.(server.Shutdowner) + return server.New(Writer_Methods(nil, s), s, c) +} + +// Writer_ServerToClient creates a new Client from an implementation of Writer_Server. +// The caller is responsible for calling Release on the returned Client. +func Writer_ServerToClient(s Writer_Server) Writer { + return Writer(capnp.NewClient(Writer_NewServer(s))) +} + +// Writer_Methods appends Methods to a slice that invoke the methods on s. +// This can be used to create a more complicated Server. +func Writer_Methods(methods []server.Method, s Writer_Server) []server.Method { + if cap(methods) == 0 { + methods = make([]server.Method, 0, 1) + } + + methods = append(methods, server.Method{ + Method: capnp.Method{ + InterfaceID: 0xcc20b9c332c83b91, + MethodID: 0, + InterfaceName: "pkg/receive/writecapnp/write_request.capnp:Writer", + MethodName: "write", + }, + Impl: func(ctx context.Context, call *server.Call) error { + return s.Write(ctx, Writer_write{call}) + }, + }) + + return methods +} + +// Writer_write holds the state for a server call to Writer.write. +// See server.Call for documentation. +type Writer_write struct { + *server.Call +} + +// Args returns the call's arguments. +func (c Writer_write) Args() Writer_write_Params { + return Writer_write_Params(c.Call.Args()) +} + +// AllocResults allocates the results struct. +func (c Writer_write) AllocResults() (Writer_write_Results, error) { + r, err := c.Call.AllocResults(capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return Writer_write_Results(r), err +} + +// Writer_List is a list of Writer. +type Writer_List = capnp.CapList[Writer] + +// NewWriter_List creates a new list of Writer. +func NewWriter_List(s *capnp.Segment, sz int32) (Writer_List, error) { + l, err := capnp.NewPointerList(s, sz) + return capnp.CapList[Writer](l), err +} + +type Writer_write_Params capnp.Struct + +// Writer_write_Params_TypeID is the unique identifier for the type Writer_write_Params. +const Writer_write_Params_TypeID = 0x90d5073324ecc34b + +func NewWriter_write_Params(s *capnp.Segment) (Writer_write_Params, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Writer_write_Params(st), err +} + +func NewRootWriter_write_Params(s *capnp.Segment) (Writer_write_Params, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) + return Writer_write_Params(st), err +} + +func ReadRootWriter_write_Params(msg *capnp.Message) (Writer_write_Params, error) { + root, err := msg.Root() + return Writer_write_Params(root.Struct()), err +} + +func (s Writer_write_Params) String() string { + str, _ := text.Marshal(0x90d5073324ecc34b, capnp.Struct(s)) + return str +} + +func (s Writer_write_Params) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Writer_write_Params) DecodeFromPtr(p capnp.Ptr) Writer_write_Params { + return Writer_write_Params(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Writer_write_Params) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Writer_write_Params) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Writer_write_Params) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Writer_write_Params) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Writer_write_Params) Wr() (WriteRequest, error) { + p, err := capnp.Struct(s).Ptr(0) + return WriteRequest(p.Struct()), err +} + +func (s Writer_write_Params) HasWr() bool { + return capnp.Struct(s).HasPtr(0) +} + +func (s Writer_write_Params) SetWr(v WriteRequest) error { + return capnp.Struct(s).SetPtr(0, capnp.Struct(v).ToPtr()) +} + +// NewWr sets the wr field to a newly +// allocated WriteRequest struct, preferring placement in s's segment. +func (s Writer_write_Params) NewWr() (WriteRequest, error) { + ss, err := NewWriteRequest(capnp.Struct(s).Segment()) + if err != nil { + return WriteRequest{}, err + } + err = capnp.Struct(s).SetPtr(0, capnp.Struct(ss).ToPtr()) + return ss, err +} + +// Writer_write_Params_List is a list of Writer_write_Params. +type Writer_write_Params_List = capnp.StructList[Writer_write_Params] + +// NewWriter_write_Params creates a new list of Writer_write_Params. +func NewWriter_write_Params_List(s *capnp.Segment, sz int32) (Writer_write_Params_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) + return capnp.StructList[Writer_write_Params](l), err +} + +// Writer_write_Params_Future is a wrapper for a Writer_write_Params promised by a client call. +type Writer_write_Params_Future struct{ *capnp.Future } + +func (f Writer_write_Params_Future) Struct() (Writer_write_Params, error) { + p, err := f.Future.Ptr() + return Writer_write_Params(p.Struct()), err +} +func (p Writer_write_Params_Future) Wr() WriteRequest_Future { + return WriteRequest_Future{Future: p.Future.Field(0, nil)} +} + +type Writer_write_Results capnp.Struct + +// Writer_write_Results_TypeID is the unique identifier for the type Writer_write_Results. +const Writer_write_Results_TypeID = 0x87f6d2196d414cf4 + +func NewWriter_write_Results(s *capnp.Segment) (Writer_write_Results, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return Writer_write_Results(st), err +} + +func NewRootWriter_write_Results(s *capnp.Segment) (Writer_write_Results, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}) + return Writer_write_Results(st), err +} + +func ReadRootWriter_write_Results(msg *capnp.Message) (Writer_write_Results, error) { + root, err := msg.Root() + return Writer_write_Results(root.Struct()), err +} + +func (s Writer_write_Results) String() string { + str, _ := text.Marshal(0x87f6d2196d414cf4, capnp.Struct(s)) + return str +} + +func (s Writer_write_Results) EncodeAsPtr(seg *capnp.Segment) capnp.Ptr { + return capnp.Struct(s).EncodeAsPtr(seg) +} + +func (Writer_write_Results) DecodeFromPtr(p capnp.Ptr) Writer_write_Results { + return Writer_write_Results(capnp.Struct{}.DecodeFromPtr(p)) +} + +func (s Writer_write_Results) ToPtr() capnp.Ptr { + return capnp.Struct(s).ToPtr() +} +func (s Writer_write_Results) IsValid() bool { + return capnp.Struct(s).IsValid() +} + +func (s Writer_write_Results) Message() *capnp.Message { + return capnp.Struct(s).Message() +} + +func (s Writer_write_Results) Segment() *capnp.Segment { + return capnp.Struct(s).Segment() +} +func (s Writer_write_Results) Error() WriteError { + return WriteError(capnp.Struct(s).Uint16(0)) +} + +func (s Writer_write_Results) SetError(v WriteError) { + capnp.Struct(s).SetUint16(0, uint16(v)) +} + +// Writer_write_Results_List is a list of Writer_write_Results. +type Writer_write_Results_List = capnp.StructList[Writer_write_Results] + +// NewWriter_write_Results creates a new list of Writer_write_Results. +func NewWriter_write_Results_List(s *capnp.Segment, sz int32) (Writer_write_Results_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 0}, sz) + return capnp.StructList[Writer_write_Results](l), err +} + +// Writer_write_Results_Future is a wrapper for a Writer_write_Results promised by a client call. +type Writer_write_Results_Future struct{ *capnp.Future } + +func (f Writer_write_Results_Future) Struct() (Writer_write_Results, error) { + p, err := f.Future.Ptr() + return Writer_write_Results(p.Struct()), err +} + +const schema_85d3acc39d94e0f8 = "x\xda\xa4Vml\x1cW\x15\xbd\xe7\xbd\x99\x1d\xdb\xbb" + + "\x8e=\x9aUQ*`\xdb*HuL\xd2\xc4NP" + + "\x92\x16m\x1c\xe2\x10\xa7F\xf2\xec&\xd0\xa0\x14:\xb1" + + "_\xed%\xb3\xb3\xdb\x99Y;6\xaa\x0c\x91P\x13T" + + ">\x1a\x88DAH\x0d\x14$\xbe\xab\xf0\xa1Fj\x91" + + " \x08\xa8\xf8P\x0bT\x02D)\x95*\x90 \x80\x90" + + "\x10B\x91\xc8\xa0\xfbv=;XFd\xc3?\xfb\xcc" + + "\xd9{\xde=\xef\xbe{\xef\x8ekr\xbf\xb1s\xf0#" + + "9\x12\xee\xb4\x99K\xfe>=Q\xdf\xfc\xb3\x7fV`\x97\xc7\xee\xb5\xad\xbf\xfc" + + "\xe0\xfc\xdf6\xaa\x8bo\x16>\x08\xe7'\x9a\xfc\\a" + + "\x89\xb6%\xcdS\xf3w\x85jVY\xb5Eu\xd7R" + + "X\x8b\xd5\xac\xd7\x0c\x9a\xed?\xdf\x1d\xaa\x87Z*\x8a" + + "\xb7kl\xdf;\x18\x0b\xb7\xebO[**j\xf9q" + + "D\xae!\x0d\"\x03D\xf6\xe0\x18\x91\xdb'\xe1\x16\x05" + + "J*\x0c\x1b!\x86\xba\x8e\x100DH\x05s\xbd\x0a" + + "\x96g\xbc\xd0\xabGY\xbd[\xbbzr)\xc4p\xd7" + + "T\x02\x863b\xe6\x0d\x88\x1dh\xcd\x9eR\xa5\xb8\xda" + + "\xf4\x82\x19\xc0\xedKeF\xf6\x11\xb9[$\xdc\x1d\x02" + + "6P\xe4\xe7joc\xf0N\x09w\x97@\xb9\xf1\xe0" + + "\x83\x91\x8aa\x90\x80A(\xfb*\x98\x8f\x17\xd0G\x02" + + "}=f|\xb8\x16\xc5\x8d\xf9\xd0\xabo\x9fm\xb4d" + + "\x10\xf31\x0aI\xd2>\xc7\x91\xae\xe4 \xae'\xed\x83" + + "\xec|'\x91\xbbC\xc2\xbdG \x99m\xb4\x82x*" + + "\x88\x89\x08\xfd$\xd0O\x1d\xec\x90\xdf \xe9\xc5\xc8\x93" + + "@\xbeGc\xaa\xcb\xf5\x93\x0d\xe9G\xeb\\\xd9\xba\x91" + + "+\x07:G<(04\xe7\xc5\x1e\x06I`\x90\xb0" + + "\xda\xb6(\xc2&\xc2\x8c\x84\xf6f\xd3M{\xb3\xa2\xc2" + + "r\xe3-\x9c\x17\x1f\xe8\xb5Ib\x14a\xf2\x99\xde\xd3" + + "u\xe8u\xb8\xcep\x8e-Z\xc9X\xb4\xa2B\xfdS" + + "\x1ab\xa3R\x97R\xb8\xcc^\xdd\xa4SGkuU" + + "-\xa9\xb0\xa6\xb4Y\xc3\xa9Y\x1eW\xcb\x09\x09\xf7t" + + "\xc6\xac\x16\x9b\xd5\x94p\x1f\x15\xb0\x85(B\x10\xd9\xe7" + + "\xf8:\xcfJ\xb8O\x0a\xd8R\x16!\x89\xec\x8b\x15\"" + + "\xf7\x09\x09\xf7\x19\x81\xb2\xef\x9dT~j\xe4p\xf7\xb9" + + "\x13\x18\\\x8d\xbcz\xd3W\x19B\xda<\xda\x84d\xa1" + + "\xe3#\xc9z\x86\x95ik\x9a\xa5N\xabz\xd3\xf7B" + + "B\x86\x94\xf6\xe4\x0e\xa9\x17s&9\xa0\xe5{![" + + "SH\xad\x99dk\xf6K\xb8'\x04\xd6\x9c9\xce\x8d" + + "\xe4\xa8\x84\xfb\x00;\x83\xb63\xf7W:\x1e.\xfco" + + "\x13J\x8b\x9e\xdfR\xe9\x1d\xc6\xb5\xba\x8ab\xafNh" + + "\xc2$\x01\xb3\xc7\xa3\xeb\xca\x1b\xe2\xd2s\x0d 3\x0e" + + "PI**R\xf1\xe1Z@\x88\xdd]\x9c\x152\xc3" + + "\xd6\xb9\x1fc$l\xb4/\xd7\x99\xc4\x1dD\xd5\xfd\x90" + + "\xa8N\x83Sk\xdf\xaf3\x85}D\xd5\x83\x8c\xcf\xa0" + + "s\xef\x06\x91\xf36\x84D\xd5i\xc6\xef\x83\x00X<" + + "\x9d\xb7\xce1T8\x97\"\x0fDgBSu\xe8\x13" + + "\x1c\"\x87\"\xfa\x88\x9c\xe3X!\xaa\xde\xc7x\xcc\xb8" + + "%\x8a\xe8'r\x1e\xd2x\x93\xf1G\x19\xef\x93E\x0c" + + "\xf0\xec\xd5q\xce2\xfe$\xe3\xfdF\x11y\"\xe7\xa2" + + "\xe6?\xc1\xf83\x8c\x0f\x98E\x14x\xfaj\xfci\xc6" + + "\x7f\xc1x\xbeX\xc4 \x91\xf3\x02*D\xd5\xe7\x19\xff" + + "\x0d\xe3\x85\\\x11\x9b\x88\x9c_i\xfc\x97\x8c\xbf\x0a\x81" + + "\x92\xeeNV\xd4\xaa\xaf]V9\x9a]Puo\xad" + + "\x99\xea\x87yt!TT\x8a\x16\x1a\xfe\\z\xa7\xe9" + + "\x83E\x9c\x04j\xde\x8bk\x8b\x8aJ\xdc\xbd3e\x91" + + "\xaeH\x9dzM\x89\xe5\x83\xca\x8f\xbd\x94iv\xdaQ" + + "\xf7\xbb\xee.\xe9\xf7\xfcZ\xbbjD\xb5\x1b\x12J\x89" + + "\xffE\xa8\xfb}c\xa1\xb0[U\x18\xea\x16\\g\x86" + + "\xfe\xbf\xf5\xac\xe7*\xf4C4\xa4I\x94\xee\x9aX\xdb" + + "Jm\x9b\xeb\xd6\xb4J\xfa\xe7\xfb1\x83\x9b\xed\xd5\x15" + + "\x15\x95u*\xb1n\x89\xfa)\x8f\x1c\xe0L\xec\xdb\xef" + + " \x82\xb07\xdfJ\x04\xc9\x8a\xb4\xda\x0aN\x05\x8d\xa5" + + "\xc0ZV\x91\x0c\x1a\xa5y\xaf5\xafzO\xad4\xc9" + + "\xcb\x07\xeb\xb5[\xc7\xee\xadZo\xdbI\xad7\x12j" + + "\xbd7\x9c!\x82a\xdf~\x84h(h\x04*i\x05" + + "\xde\xa2W\xf3=\xb2N\xfa*\xf1\xfcPys\xcb\x93" + + "T:]\x8b\xe2(\xa9\x05\x8b\x9e_\x9b\x9b\x10\xe1|" + + "\xab\xae\x82\x98jA\xac\xc2\xc0\xf3\x89z\xdfi*m" + + "\x88\xd6\xb5\xc2\x03\x9dV8\x9d\x99\x12S<\x10\x0eK" + + "\xb8s\x99)\x91\xce\x93\x05\x81\xd5\x88\xe7\xb3\x1fa\xb8" + + "\xbb\xb8w\xd6\x1f\xae\x93\xaa\x0ak$\xb3\xf3 \xdd\xc2" + + "\xdb\xb5Z\x8eU\xe0\x051\x0a$P\xe8u5\xe0Y" + + "\x03\xb5n3\xa8t\xa7p\x9a\xc6Nn\xe9o\x94p" + + "\xf7\x88\x0d\xebw]\xc7\xee\xe5\x10\xd3<\x0b\x88nd" + + "=\x19\xeb\x9el(\xf0\xeajmG\xeb\xa8w\xfe\xfb" + + "w\x00\x00\x00\xff\xff9\x1a\xaea" + +func RegisterSchema(reg *schemas.Registry) { + reg.Register(&schemas.Schema{ + String: schema_85d3acc39d94e0f8, + Nodes: []uint64{ + 0x87f6d2196d414cf4, + 0x90d5073324ecc34b, + 0x983649d193295eae, + 0xa360d0951639b987, + 0xab79e1a6ecfeb87a, + 0xb374a1809b79340e, + 0xb438c10228b97446, + 0xbd820120399954be, + 0xc4dd3c458256382a, + 0xcc20b9c332c83b91, + 0xd5b0cec646441eb0, + 0xe67be4164a39ea55, + 0xeb3bcb770c8eb6be, + 0xef49df6cfa8875de, + 0xf192c7ee07114b32, + }, + Compressed: true, + }) +} diff --git a/pkg/receive/writecapnp/write_request.go b/pkg/receive/writecapnp/write_request.go new file mode 100644 index 0000000000..3b81741258 --- /dev/null +++ b/pkg/receive/writecapnp/write_request.go @@ -0,0 +1,271 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "unsafe" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/thanos/pkg/pool" +) + +var symbolsPool = pool.MustNewBucketedPool[string](256, 65536, 2, 0) + +type HistogramSample struct { + Timestamp int64 + Histogram *histogram.Histogram + FloatHistogram *histogram.FloatHistogram +} + +type FloatSample struct { + Value float64 + Timestamp int64 +} + +type Series struct { + Labels labels.Labels + Samples []FloatSample + Histograms []HistogramSample + Exemplars []exemplar.Exemplar +} + +type Request struct { + i int + symbols *[]string + builder labels.ScratchBuilder + series TimeSeries_List +} + +func NewRequest(wr WriteRequest) (*Request, error) { + ts, err := wr.TimeSeries() + if err != nil { + return nil, err + } + symTable, err := wr.Symbols() + if err != nil { + return nil, err + } + data, err := symTable.Data() + if err != nil { + return nil, err + } + offsets, err := symTable.Offsets() + if err != nil { + return nil, err + } + + strings, _ := symbolsPool.Get(offsets.Len()) + start := uint32(0) + for i := 0; i < offsets.Len(); i++ { + end := offsets.At(i) + if start == end { + *strings = append(*strings, "") + } else { + b := data[start:end] + *strings = append(*strings, unsafe.String(&b[0], len(b))) + } + start = end + } + + return &Request{ + i: -1, + symbols: strings, + series: ts, + builder: labels.NewScratchBuilder(8), + }, nil +} + +func (s *Request) Next() bool { + s.i++ + return s.i < s.series.Len() +} + +func (s *Request) At(t *Series) error { + lbls, err := s.series.At(s.i).Labels() + if err != nil { + return err + } + + s.builder.Reset() + for i := 0; i < lbls.Len(); i++ { + lbl := lbls.At(i) + s.builder.Add((*s.symbols)[lbl.Name()], (*s.symbols)[lbl.Value()]) + } + s.builder.Overwrite(&t.Labels) + + samples, err := s.series.At(s.i).Samples() + if err != nil { + return err + } + t.Samples = t.Samples[:0] + for i := 0; i < samples.Len(); i++ { + sample := samples.At(i) + t.Samples = append(t.Samples, FloatSample{ + Value: sample.Value(), + Timestamp: sample.Timestamp(), + }) + } + + histograms, err := s.series.At(s.i).Histograms() + if err != nil { + return err + } + t.Histograms = t.Histograms[:0] + for i := 0; i < histograms.Len(); i++ { + h, err := s.readHistogram(histograms.At(i)) + if err != nil { + return err + } + t.Histograms = append(t.Histograms, h) + } + + exemplars, err := s.series.At(s.i).Exemplars() + if err != nil { + return err + } + t.Exemplars = t.Exemplars[:0] + for i := 0; i < exemplars.Len(); i++ { + ex, err := s.readExemplar(s.symbols, exemplars.At(i)) + if err != nil { + return err + } + t.Exemplars = append(t.Exemplars, ex) + } + return nil +} + +func (s *Request) readHistogram(src Histogram) (HistogramSample, error) { + var ( + h *histogram.Histogram + fh *histogram.FloatHistogram + err error + ) + if src.Count().Which() == Histogram_count_Which_countInt { + h = &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(src.ResetHint()), + Count: src.Count().CountInt(), + Sum: src.Sum(), + Schema: src.Schema(), + ZeroThreshold: src.ZeroThreshold(), + ZeroCount: src.ZeroCount().ZeroCountInt(), + } + h.PositiveSpans, h.NegativeSpans, err = createSpans(src) + if err != nil { + return HistogramSample{}, err + } + + positiveDeltas, err := src.PositiveDeltas() + if err != nil { + return HistogramSample{}, err + } + if positiveDeltas.Len() > 0 { + h.PositiveBuckets = make([]int64, positiveDeltas.Len()) + for i := 0; i < positiveDeltas.Len(); i++ { + h.PositiveBuckets[i] = positiveDeltas.At(i) + } + } + + negativeDeltas, err := src.NegativeDeltas() + if err != nil { + return HistogramSample{}, err + } + if negativeDeltas.Len() > 0 { + h.NegativeBuckets = make([]int64, negativeDeltas.Len()) + for i := 0; i < negativeDeltas.Len(); i++ { + h.NegativeBuckets[i] = negativeDeltas.At(i) + } + } + } else { + fh = &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(src.ResetHint()), + Count: src.Count().CountFloat(), + Sum: src.Sum(), + Schema: src.Schema(), + ZeroThreshold: src.ZeroThreshold(), + ZeroCount: src.ZeroCount().ZeroCountFloat(), + } + fh.PositiveSpans, fh.NegativeSpans, err = createSpans(src) + if err != nil { + return HistogramSample{}, err + } + + positiveCounts, err := src.PositiveCounts() + if err != nil { + return HistogramSample{}, err + } + if positiveCounts.Len() > 0 { + fh.PositiveBuckets = make([]float64, positiveCounts.Len()) + for i := 0; i < positiveCounts.Len(); i++ { + fh.PositiveBuckets[i] = positiveCounts.At(i) + } + } + + negativeCounts, err := src.NegativeCounts() + if err != nil { + return HistogramSample{}, err + } + if negativeCounts.Len() > 0 { + fh.NegativeBuckets = make([]float64, negativeCounts.Len()) + for i := 0; i < negativeCounts.Len(); i++ { + fh.NegativeBuckets[i] = negativeCounts.At(i) + } + } + } + + return HistogramSample{ + Timestamp: src.Timestamp(), + Histogram: h, + FloatHistogram: fh, + }, nil +} + +type spanGetter interface { + PositiveSpans() (BucketSpan_List, error) + NegativeSpans() (BucketSpan_List, error) +} + +func createSpans(src spanGetter) ([]histogram.Span, []histogram.Span, error) { + positiveSpans, err := src.PositiveSpans() + if err != nil { + return nil, nil, err + } + negativeSpans, err := src.NegativeSpans() + if err != nil { + return nil, nil, err + } + return copySpans(positiveSpans), copySpans(negativeSpans), nil +} + +func copySpans(src BucketSpan_List) []histogram.Span { + spans := make([]histogram.Span, src.Len()) + for i := 0; i < src.Len(); i++ { + spans[i].Offset = src.At(i).Offset() + spans[i].Length = src.At(i).Length() + } + return spans +} + +func (s *Request) readExemplar(symbols *[]string, e Exemplar) (exemplar.Exemplar, error) { + ex := exemplar.Exemplar{} + lbls, err := e.Labels() + if err != nil { + return ex, err + } + + builder := labels.ScratchBuilder{} + for i := 0; i < lbls.Len(); i++ { + builder.Add((*symbols)[lbls.At(i).Name()], (*symbols)[lbls.At(i).Value()]) + } + ex.Labels = builder.Labels() + ex.Value = e.Value() + ex.Ts = e.Timestamp() + return ex, nil +} + +func (s *Request) Close() error { + symbolsPool.Put(s.symbols) + return nil +} diff --git a/pkg/receive/writecapnp/write_request_test.go b/pkg/receive/writecapnp/write_request_test.go new file mode 100644 index 0000000000..837c015aa5 --- /dev/null +++ b/pkg/receive/writecapnp/write_request_test.go @@ -0,0 +1,42 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package writecapnp + +import ( + "testing" + + "capnproto.org/go/capnp/v3" + "github.com/stretchr/testify/require" +) + +func TestNewRequest(t *testing.T) { + arena := capnp.SingleSegment(nil) + + _, seg, err := capnp.NewMessage(arena) + require.NoError(t, err) + + wr, err := NewRootWriteRequest(seg) + require.NoError(t, err) + + symbols, err := NewSymbols(seg) + require.NoError(t, err) + + require.NoError(t, symbols.SetData([]byte(`foobar`))) + list, err := capnp.NewUInt32List( + seg, 2, + ) + require.NoError(t, err) + list.Set(0, 3) + list.Set(1, 6) + + require.NoError(t, symbols.SetOffsets(list)) + require.NoError(t, wr.SetSymbols(symbols)) + + req, err := NewRequest(wr) + require.NoError(t, err) + + require.Equal(t, "foo", (*req.symbols)[0]) + require.Equal(t, "bar", (*req.symbols)[1]) + +} diff --git a/pkg/receive/writer.go b/pkg/receive/writer.go index 883db29c56..d17fd6453c 100644 --- a/pkg/receive/writer.go +++ b/pkg/receive/writer.go @@ -72,24 +72,9 @@ func NewWriter(logger log.Logger, multiTSDB TenantStorage, opts *WriterOptions) } } -func (r *Writer) Write(ctx context.Context, tenantID string, wreq *prompb.WriteRequest) error { +func (r *Writer) Write(ctx context.Context, tenantID string, wreq []prompb.TimeSeries) error { tLogger := log.With(r.logger, "tenant", tenantID) - var ( - numLabelsOutOfOrder = 0 - numLabelsDuplicates = 0 - numLabelsEmpty = 0 - - numSamplesOutOfOrder = 0 - numSamplesDuplicates = 0 - numSamplesOutOfBounds = 0 - numSamplesTooOld = 0 - - numExemplarsOutOfOrder = 0 - numExemplarsDuplicate = 0 - numExemplarsLabelLength = 0 - ) - s, err := r.multiTSDB.TenantAppendable(tenantID) if err != nil { return errors.Wrap(err, "get tenant appendable") @@ -104,33 +89,21 @@ func (r *Writer) Write(ctx context.Context, tenantID string, wreq *prompb.WriteR } getRef := app.(storage.GetRef) var ( - ref storage.SeriesRef - errs writeErrors + ref storage.SeriesRef + errorTracker writeErrorTracker ) app = &ReceiveAppender{ tLogger: tLogger, tooFarInFuture: r.opts.TooFarInFutureTimeWindow, Appender: app, } - for _, t := range wreq.Timeseries { + + for _, t := range wreq { // Check if time series labels are valid. If not, skip the time series // and report the error. if err := labelpb.ValidateLabels(t.Labels); err != nil { lset := &labelpb.ZLabelSet{Labels: t.Labels} - switch err { - case labelpb.ErrOutOfOrderLabels: - numLabelsOutOfOrder++ - level.Debug(tLogger).Log("msg", "Out of order labels in the label set", "lset", lset.String()) - case labelpb.ErrDuplicateLabels: - numLabelsDuplicates++ - level.Debug(tLogger).Log("msg", "Duplicate labels in the label set", "lset", lset.String()) - case labelpb.ErrEmptyLabels: - numLabelsEmpty++ - level.Debug(tLogger).Log("msg", "Labels with empty name in the label set", "lset", lset.String()) - default: - level.Debug(tLogger).Log("msg", "Error validating labels", "err", err) - } - + errorTracker.addLabelsError(err, lset, tLogger) continue } @@ -148,26 +121,12 @@ func (r *Writer) Write(ctx context.Context, tenantID string, wreq *prompb.WriteR // Append as many valid samples as possible, but keep track of the errors. for _, s := range t.Samples { ref, err = app.Append(ref, lset, s.Timestamp, s.Value) - switch err { - case storage.ErrOutOfOrderSample: - numSamplesOutOfOrder++ - level.Debug(tLogger).Log("msg", "Out of order sample", "lset", lset, "value", s.Value, "timestamp", s.Timestamp) - case storage.ErrDuplicateSampleForTimestamp: - numSamplesDuplicates++ - level.Debug(tLogger).Log("msg", "Duplicate sample for timestamp", "lset", lset, "value", s.Value, "timestamp", s.Timestamp) - case storage.ErrOutOfBounds: - numSamplesOutOfBounds++ - level.Debug(tLogger).Log("msg", "Out of bounds metric", "lset", lset, "value", s.Value, "timestamp", s.Timestamp) - case storage.ErrTooOldSample: - numSamplesTooOld++ - level.Debug(tLogger).Log("msg", "Sample is too old", "lset", lset, "value", s.Value, "timestamp", s.Timestamp) - default: - if err != nil { - level.Debug(tLogger).Log("msg", "Error ingesting sample", "err", err) - } - } + errorTracker.addSampleError(err, tLogger, lset, s.Timestamp, s.Value) } + b := labels.ScratchBuilder{} + b.Labels() + for _, hp := range t.Histograms { var ( h *histogram.Histogram @@ -181,24 +140,7 @@ func (r *Writer) Write(ctx context.Context, tenantID string, wreq *prompb.WriteR } ref, err = app.AppendHistogram(ref, lset, hp.Timestamp, h, fh) - switch err { - case storage.ErrOutOfOrderSample: - numSamplesOutOfOrder++ - level.Debug(tLogger).Log("msg", "Out of order histogram", "lset", lset, "timestamp", hp.Timestamp) - case storage.ErrDuplicateSampleForTimestamp: - numSamplesDuplicates++ - level.Debug(tLogger).Log("msg", "Duplicate histogram for timestamp", "lset", lset, "timestamp", hp.Timestamp) - case storage.ErrOutOfBounds: - numSamplesOutOfBounds++ - level.Debug(tLogger).Log("msg", "Out of bounds metric", "lset", lset, "timestamp", hp.Timestamp) - case storage.ErrTooOldSample: - numSamplesTooOld++ - level.Debug(tLogger).Log("msg", "Histogram is too old", "lset", lset, "timestamp", hp.Timestamp) - default: - if err != nil { - level.Debug(tLogger).Log("msg", "Error ingesting histogram", "err", err) - } - } + errorTracker.addHistogramError(err, tLogger, lset, hp.Timestamp) } // Current implementation of app.AppendExemplar doesn't create a new series, so it must be already present. @@ -214,67 +156,13 @@ func (r *Writer) Write(ctx context.Context, tenantID string, wreq *prompb.WriteR Ts: ex.Timestamp, HasTs: true, }); err != nil { - switch err { - case storage.ErrOutOfOrderExemplar: - numExemplarsOutOfOrder++ - level.Debug(exLogger).Log("msg", "Out of order exemplar") - case storage.ErrDuplicateExemplar: - numExemplarsDuplicate++ - level.Debug(exLogger).Log("msg", "Duplicate exemplar") - case storage.ErrExemplarLabelLength: - numExemplarsLabelLength++ - level.Debug(exLogger).Log("msg", "Label length for exemplar exceeds max limit", "limit", exemplar.ExemplarMaxLabelSetLength) - default: - level.Debug(exLogger).Log("msg", "Error ingesting exemplar", "err", err) - } + errorTracker.addExemplarError(err, exLogger) } } } } - if numLabelsOutOfOrder > 0 { - level.Info(tLogger).Log("msg", "Error on series with out-of-order labels", "numDropped", numLabelsOutOfOrder) - errs.Add(errors.Wrapf(labelpb.ErrOutOfOrderLabels, "add %d series", numLabelsOutOfOrder)) - } - if numLabelsDuplicates > 0 { - level.Info(tLogger).Log("msg", "Error on series with duplicate labels", "numDropped", numLabelsDuplicates) - errs.Add(errors.Wrapf(labelpb.ErrDuplicateLabels, "add %d series", numLabelsDuplicates)) - } - if numLabelsEmpty > 0 { - level.Info(tLogger).Log("msg", "Error on series with empty label name or value", "numDropped", numLabelsEmpty) - errs.Add(errors.Wrapf(labelpb.ErrEmptyLabels, "add %d series", numLabelsEmpty)) - } - - if numSamplesOutOfOrder > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting out-of-order samples", "numDropped", numSamplesOutOfOrder) - errs.Add(errors.Wrapf(storage.ErrOutOfOrderSample, "add %d samples", numSamplesOutOfOrder)) - } - if numSamplesDuplicates > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting samples with different value but same timestamp", "numDropped", numSamplesDuplicates) - errs.Add(errors.Wrapf(storage.ErrDuplicateSampleForTimestamp, "add %d samples", numSamplesDuplicates)) - } - if numSamplesOutOfBounds > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "numDropped", numSamplesOutOfBounds) - errs.Add(errors.Wrapf(storage.ErrOutOfBounds, "add %d samples", numSamplesOutOfBounds)) - } - if numSamplesTooOld > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting samples that are outside of the allowed out-of-order time window", "numDropped", numSamplesTooOld) - errs.Add(errors.Wrapf(storage.ErrTooOldSample, "add %d samples", numSamplesTooOld)) - } - - if numExemplarsOutOfOrder > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting out-of-order exemplars", "numDropped", numExemplarsOutOfOrder) - errs.Add(errors.Wrapf(storage.ErrOutOfOrderExemplar, "add %d exemplars", numExemplarsOutOfOrder)) - } - if numExemplarsDuplicate > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting duplicate exemplars", "numDropped", numExemplarsDuplicate) - errs.Add(errors.Wrapf(storage.ErrDuplicateExemplar, "add %d exemplars", numExemplarsDuplicate)) - } - if numExemplarsLabelLength > 0 { - level.Info(tLogger).Log("msg", "Error on ingesting exemplars with label length exceeding maximum limit", "numDropped", numExemplarsLabelLength) - errs.Add(errors.Wrapf(storage.ErrExemplarLabelLength, "add %d exemplars", numExemplarsLabelLength)) - } - + errs := errorTracker.collectErrors(tLogger) if err := app.Commit(); err != nil { errs.Add(errors.Wrap(err, "commit samples")) } diff --git a/pkg/receive/writer_errors.go b/pkg/receive/writer_errors.go new file mode 100644 index 0000000000..ee807564e6 --- /dev/null +++ b/pkg/receive/writer_errors.go @@ -0,0 +1,161 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package receive + +import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/thanos-io/thanos/pkg/store/labelpb" +) + +type writeErrorTracker struct { + numLabelsOutOfOrder int + numLabelsDuplicates int + numLabelsEmpty int + + numSamplesOutOfOrder int + numSamplesDuplicates int + numSamplesOutOfBounds int + numSamplesTooOld int + + numExemplarsOutOfOrder int + numExemplarsDuplicate int + numExemplarsLabelLength int +} + +func (a *writeErrorTracker) addLabelsError(err error, lset *labelpb.ZLabelSet, logger log.Logger) { + if err == nil { + return + } + + switch err { + case labelpb.ErrOutOfOrderLabels: + a.numLabelsOutOfOrder++ + level.Debug(logger).Log("msg", "Out of order labels in the label set", "lset", lset.String()) + case labelpb.ErrDuplicateLabels: + a.numLabelsDuplicates++ + level.Debug(logger).Log("msg", "Duplicate labels in the label set", "lset", lset.String()) + case labelpb.ErrEmptyLabels: + a.numLabelsEmpty++ + level.Debug(logger).Log("msg", "Labels with empty name in the label set", "lset", lset.String()) + default: + level.Debug(logger).Log("msg", "Error validating labels", "err", err) + } +} + +func (a *writeErrorTracker) addSampleError(err error, tLogger log.Logger, lset labels.Labels, t int64, v float64) { + if err == nil { + return + } + + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + a.numSamplesOutOfOrder++ + level.Debug(tLogger).Log("msg", "Out of order sample", "lset", lset, "value", v, "timestamp", t) + case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + a.numSamplesDuplicates++ + level.Debug(tLogger).Log("msg", "Duplicate sample for timestamp", "lset", lset, "value", v, "timestamp", t) + case errors.Is(err, storage.ErrOutOfBounds): + a.numSamplesOutOfBounds++ + level.Debug(tLogger).Log("msg", "Out of bounds metric", "lset", lset, "value", v, "timestamp", t) + case errors.Is(err, storage.ErrTooOldSample): + a.numSamplesTooOld++ + level.Debug(tLogger).Log("msg", "Sample is too old", "lset", lset, "value", v, "timestamp", t) + default: + level.Debug(tLogger).Log("msg", "Error ingesting sample", "err", err) + } +} + +func (a *writeErrorTracker) addHistogramError(err error, tLogger log.Logger, lset labels.Labels, timestamp int64) { + if err == nil { + return + } + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + a.numSamplesOutOfOrder++ + level.Debug(tLogger).Log("msg", "Out of order histogram", "lset", lset, "timestamp", timestamp) + case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): + a.numSamplesDuplicates++ + level.Debug(tLogger).Log("msg", "Duplicate histogram for timestamp", "lset", lset, "timestamp", timestamp) + case errors.Is(err, storage.ErrOutOfBounds): + a.numSamplesOutOfBounds++ + level.Debug(tLogger).Log("msg", "Out of bounds metric", "lset", lset, "timestamp", timestamp) + case errors.Is(err, storage.ErrTooOldSample): + a.numSamplesTooOld++ + level.Debug(tLogger).Log("msg", "Histogram is too old", "lset", lset, "timestamp", timestamp) + default: + level.Debug(tLogger).Log("msg", "Error ingesting histogram", "err", err) + } +} + +func (a *writeErrorTracker) addExemplarError(err error, exLogger log.Logger) { + if err == nil { + return + } + + switch { + case errors.Is(err, storage.ErrOutOfOrderExemplar): + a.numExemplarsOutOfOrder++ + level.Debug(exLogger).Log("msg", "Out of order exemplar") + case errors.Is(err, storage.ErrDuplicateExemplar): + a.numExemplarsDuplicate++ + level.Debug(exLogger).Log("msg", "Duplicate exemplar") + case errors.Is(err, storage.ErrExemplarLabelLength): + a.numExemplarsLabelLength++ + level.Debug(exLogger).Log("msg", "Label length for exemplar exceeds max limit", "limit", exemplar.ExemplarMaxLabelSetLength) + default: + level.Debug(exLogger).Log("msg", "Error ingesting exemplar", "err", err) + } +} + +func (a *writeErrorTracker) collectErrors(tLogger log.Logger) writeErrors { + var errs writeErrors + if a.numLabelsOutOfOrder > 0 { + level.Warn(tLogger).Log("msg", "Error on series with out-of-order labels", "numDropped", a.numLabelsOutOfOrder) + errs.Add(errors.Wrapf(labelpb.ErrOutOfOrderLabels, "add %d series", a.numLabelsOutOfOrder)) + } + if a.numLabelsDuplicates > 0 { + level.Warn(tLogger).Log("msg", "Error on series with duplicate labels", "numDropped", a.numLabelsDuplicates) + errs.Add(errors.Wrapf(labelpb.ErrDuplicateLabels, "add %d series", a.numLabelsDuplicates)) + } + if a.numLabelsEmpty > 0 { + level.Warn(tLogger).Log("msg", "Error on series with empty label name or value", "numDropped", a.numLabelsEmpty) + errs.Add(errors.Wrapf(labelpb.ErrEmptyLabels, "add %d series", a.numLabelsEmpty)) + } + + if a.numSamplesOutOfOrder > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting out-of-order samples", "numDropped", a.numSamplesOutOfOrder) + errs.Add(errors.Wrapf(storage.ErrOutOfOrderSample, "add %d samples", a.numSamplesOutOfOrder)) + } + if a.numSamplesDuplicates > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting samples with different value but same timestamp", "numDropped", a.numSamplesDuplicates) + errs.Add(errors.Wrapf(storage.ErrDuplicateSampleForTimestamp, "add %d samples", a.numSamplesDuplicates)) + } + if a.numSamplesOutOfBounds > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "numDropped", a.numSamplesOutOfBounds) + errs.Add(errors.Wrapf(storage.ErrOutOfBounds, "add %d samples", a.numSamplesOutOfBounds)) + } + if a.numSamplesTooOld > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting samples that are outside of the allowed out-of-order time window", "numDropped", a.numSamplesTooOld) + errs.Add(errors.Wrapf(storage.ErrTooOldSample, "add %d samples", a.numSamplesTooOld)) + } + + if a.numExemplarsOutOfOrder > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting out-of-order exemplars", "numDropped", a.numExemplarsOutOfOrder) + errs.Add(errors.Wrapf(storage.ErrOutOfOrderExemplar, "add %d exemplars", a.numExemplarsOutOfOrder)) + } + if a.numExemplarsDuplicate > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting duplicate exemplars", "numDropped", a.numExemplarsDuplicate) + errs.Add(errors.Wrapf(storage.ErrDuplicateExemplar, "add %d exemplars", a.numExemplarsDuplicate)) + } + if a.numExemplarsLabelLength > 0 { + level.Warn(tLogger).Log("msg", "Error on ingesting exemplars with label length exceeding maximum limit", "numDropped", a.numExemplarsLabelLength) + errs.Add(errors.Wrapf(storage.ErrExemplarLabelLength, "add %d exemplars", a.numExemplarsLabelLength)) + } + return errs +} diff --git a/pkg/receive/writer_test.go b/pkg/receive/writer_test.go index 34613794b8..1220e23f72 100644 --- a/pkg/receive/writer_test.go +++ b/pkg/receive/writer_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/thanos-io/thanos/pkg/receive/writecapnp" + "github.com/efficientgo/core/testutil" "github.com/go-kit/log" "github.com/pkg/errors" @@ -327,69 +329,110 @@ func TestWriter(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - dir := t.TempDir() - logger := log.NewNopLogger() - - m := NewMultiTSDB(dir, logger, prometheus.NewRegistry(), &tsdb.Options{ - MinBlockDuration: (2 * time.Hour).Milliseconds(), - MaxBlockDuration: (2 * time.Hour).Milliseconds(), - RetentionDuration: (6 * time.Hour).Milliseconds(), - NoLockfile: true, - MaxExemplars: testData.maxExemplars, - EnableExemplarStorage: true, - EnableNativeHistograms: true, - }, - labels.FromStrings("replica", "01"), - "tenant_id", - nil, - false, - metadata.NoneFunc, - ) - t.Cleanup(func() { testutil.Ok(t, m.Close()) }) - - testutil.Ok(t, m.Flush()) - testutil.Ok(t, m.Open()) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + t.Run("proto_writer", func(t *testing.T) { + logger, m, app := setupMultitsdb(t, testData.maxExemplars) + + w := NewWriter(logger, m, testData.opts) + + for idx, req := range testData.reqs { + err := w.Write(context.Background(), tenancy.DefaultTenant, req.Timeseries) + + // We expect no error on any request except the last one + // which may error (and in that case we assert on it). + if testData.expectedErr == nil || idx < len(testData.reqs)-1 { + testutil.Ok(t, err) + } else { + testutil.NotOk(t, err) + testutil.Equals(t, testData.expectedErr.Error(), err.Error()) + } + } - app, err := m.TenantAppendable(tenancy.DefaultTenant) - testutil.Ok(t, err) + assertWrittenData(t, app, testData.expectedIngested) + }) - testutil.Ok(t, runutil.Retry(1*time.Second, ctx.Done(), func() error { - _, err = app.Appender(context.Background()) - return err - })) + t.Run("capnproto_writer", func(t *testing.T) { + logger, m, app := setupMultitsdb(t, testData.maxExemplars) - w := NewWriter(logger, m, testData.opts) + opts := &CapNProtoWriterOptions{} + if testData.opts != nil { + opts.TooFarInFutureTimeWindow = testData.opts.TooFarInFutureTimeWindow + } + w := NewCapNProtoWriter(logger, m, opts) - for idx, req := range testData.reqs { - err = w.Write(context.Background(), tenancy.DefaultTenant, req) + for idx, req := range testData.reqs { + capnpReq, err := writecapnp.Build(tenancy.DefaultTenant, req.Timeseries) + testutil.Ok(t, err) - // We expect no error on any request except the last one - // which may error (and in that case we assert on it). - if testData.expectedErr == nil || idx < len(testData.reqs)-1 { + wr, err := writecapnp.NewRequest(capnpReq) testutil.Ok(t, err) - } else { - testutil.NotOk(t, err) - testutil.Equals(t, testData.expectedErr.Error(), err.Error()) + err = w.Write(context.Background(), tenancy.DefaultTenant, wr) + + // We expect no error on any request except the last one + // which may error (and in that case we assert on it). + if testData.expectedErr == nil || idx < len(testData.reqs)-1 { + testutil.Ok(t, err) + } else { + testutil.NotOk(t, err) + testutil.Equals(t, testData.expectedErr.Error(), err.Error()) + } } - } - - // On each expected series, assert we have a ref available. - a, err := app.Appender(context.Background()) - testutil.Ok(t, err) - gr := a.(storage.GetRef) - - for _, ts := range testData.expectedIngested { - l := labelpb.ZLabelsToPromLabels(ts.Labels) - ref, _ := gr.GetRef(l, l.Hash()) - testutil.Assert(t, ref != 0, fmt.Sprintf("appender should have reference to series %v", ts)) - } + + assertWrittenData(t, app, testData.expectedIngested) + }) }) } } +func assertWrittenData(t *testing.T, app Appendable, expectedIngested []prompb.TimeSeries) { + // On each expected series, assert we have a ref available. + a, err := app.Appender(context.Background()) + testutil.Ok(t, err) + gr := a.(storage.GetRef) + + for _, ts := range expectedIngested { + l := labelpb.ZLabelsToPromLabels(ts.Labels) + ref, _ := gr.GetRef(l, l.Hash()) + testutil.Assert(t, ref != 0, fmt.Sprintf("appender should have reference to series %v", ts)) + } +} + +func setupMultitsdb(t *testing.T, maxExemplars int64) (log.Logger, *MultiTSDB, Appendable) { + dir := t.TempDir() + logger := log.NewNopLogger() + + m := NewMultiTSDB(dir, logger, prometheus.NewRegistry(), &tsdb.Options{ + MinBlockDuration: (2 * time.Hour).Milliseconds(), + MaxBlockDuration: (2 * time.Hour).Milliseconds(), + RetentionDuration: (6 * time.Hour).Milliseconds(), + NoLockfile: true, + MaxExemplars: maxExemplars, + EnableExemplarStorage: true, + EnableNativeHistograms: true, + }, + labels.FromStrings("replica", "01"), + "tenant_id", + nil, + false, + metadata.NoneFunc, + ) + t.Cleanup(func() { testutil.Ok(t, m.Close()) }) + + testutil.Ok(t, m.Flush()) + testutil.Ok(t, m.Open()) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + app, err := m.TenantAppendable(tenancy.DefaultTenant) + testutil.Ok(t, err) + + testutil.Ok(t, runutil.Retry(1*time.Second, ctx.Done(), func() error { + _, err = app.Appender(context.Background()) + return err + })) + return logger, m, app +} + func BenchmarkWriterTimeSeriesWithSingleLabel_10(b *testing.B) { benchmarkWriter(b, 1, 10, false) } func BenchmarkWriterTimeSeriesWithSingleLabel_100(b *testing.B) { benchmarkWriter(b, 1, 100, false) } func BenchmarkWriterTimeSeriesWithSingleLabel_1000(b *testing.B) { benchmarkWriter(b, 1, 1000, false) } @@ -466,7 +509,7 @@ func benchmarkWriter(b *testing.B, labelsNum int, seriesNum int, generateHistogr b.ResetTimer() for i := 0; i < b.N; i++ { - testutil.Ok(b, w.Write(ctx, "foo", wreq)) + testutil.Ok(b, w.Write(ctx, "foo", wreq.Timeseries)) } }) @@ -477,7 +520,7 @@ func benchmarkWriter(b *testing.B, labelsNum int, seriesNum int, generateHistogr b.ResetTimer() for i := 0; i < b.N; i++ { - testutil.Ok(b, w.Write(ctx, "foo", wreq)) + testutil.Ok(b, w.Write(ctx, "foo", wreq.Timeseries)) } }) diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 0ae6dd8d7a..c8a9e7fc62 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -539,6 +539,7 @@ type ReceiveBuilder struct { f e2e.FutureRunnable maxExemplars int + capnp bool ingestion bool limit int tenantsLimits receive.TenantsWriteLimitsConfig @@ -555,7 +556,7 @@ type ReceiveBuilder struct { func NewReceiveBuilder(e e2e.Environment, name string) *ReceiveBuilder { f := e.Runnable(fmt.Sprintf("receive-%v", name)). - WithPorts(map[string]int{"http": 8080, "grpc": 9091, "remote-write": 8081}). + WithPorts(map[string]int{"http": 8080, "grpc": 9091, "remote-write": 8081, "capnp": 19391}). Future() return &ReceiveBuilder{ Linkable: f, @@ -586,6 +587,11 @@ func (r *ReceiveBuilder) WithLabel(name, value string) *ReceiveBuilder { return r } +func (r *ReceiveBuilder) UseCapnpReplication() *ReceiveBuilder { + r.capnp = true + return r +} + func (r *ReceiveBuilder) WithRouting(replication int, hashringConfigs ...receive.HashringConfig) *ReceiveBuilder { r.hashringConfigs = hashringConfigs r.replication = replication @@ -646,6 +652,10 @@ func (r *ReceiveBuilder) Init() *e2eobs.Observable { args["--label"] = fmt.Sprintf("%s,%s", args["--label"], strings.Join(r.labels, ",")) } + if r.capnp { + args["--receive.replication-protocol"] = "capnproto" + } + hashring := r.hashringConfigs if len(hashring) > 0 && r.ingestion { args["--receive.local-endpoint"] = r.InternalEndpoint("grpc") diff --git a/test/e2e/receive_test.go b/test/e2e/receive_test.go index 4f70479dfc..c938a4f040 100644 --- a/test/e2e/receive_test.go +++ b/test/e2e/receive_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" "github.com/stretchr/testify/require" @@ -1152,3 +1153,65 @@ func TestReceiveExtractsTenant(t *testing.T) { }) } + +func TestReceiveCpnp(t *testing.T) { + e, err := e2e.NewDockerEnvironment("receive-cpnp") + testutil.Ok(t, err) + t.Cleanup(e2ethanos.CleanScenario(t, e)) + + i := e2ethanos.NewReceiveBuilder(e, "ingestor").WithIngestionEnabled().Init() + testutil.Ok(t, e2e.StartAndWaitReady(i)) + + h := receive.HashringConfig{ + TenantMatcherType: "glob", + Tenants: []string{ + "default*", + }, + Endpoints: []receive.Endpoint{ + {Address: i.InternalEndpoint("grpc"), CapNProtoAddress: i.InternalEndpoint("capnp")}, + }, + } + + r := e2ethanos.NewReceiveBuilder(e, "router").UseCapnpReplication().WithRouting(1, h).Init() + testutil.Ok(t, e2e.StartAndWaitReady(r)) + + ts := time.Now() + + require.NoError(t, runutil.RetryWithLog(logkit.NewLogfmtLogger(os.Stdout), 1*time.Second, make(<-chan struct{}), func() error { + return storeWriteRequest(context.Background(), "http://"+r.Endpoint("remote-write")+"/api/v1/receive", &prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: model.MetricNameLabel, Value: "myself"}, + }, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: timestamp.FromTime(ts)}, + }, + }, + }, + }) + })) + + testutil.Ok(t, i.WaitSumMetricsWithOptions(e2emon.Equals(0), []string{"prometheus_tsdb_blocks_loaded"}, e2emon.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tenant", "default-tenant")), e2emon.WaitMissingMetrics())) + + q := e2ethanos.NewQuerierBuilder(e, "1", i.InternalEndpoint("grpc")).Init() + testutil.Ok(t, e2e.StartAndWaitReady(q)) + + v := instantQuery(t, context.Background(), q.Endpoint("http"), func() string { return "myself" }, func() time.Time { return ts }, promclient.QueryOptions{ + Deduplicate: false, + }, 1) + + v[0].Timestamp = 0 + + require.Equal(t, model.Vector{ + { + Metric: model.Metric{ + model.MetricNameLabel: "myself", + "receive": "receive-ingestor", + "tenant_id": "default-tenant", + }, + Value: 1, + }, + }, v) + +}