diff --git a/CHANGELOG.md b/CHANGELOG.md index c5ec98ff41..7833f8f67a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,29 @@ project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +### Request Body Size Limits + +OPA now rejects requests with request bodies larger than a preset maximum size. To control this behavior, two new configuration keys are available: `server.decoding.max_length` and `server.decoding.gzip.max_length`. These control the max size in bytes to allow for an incoming request payload, and the maximum size in bytes to allow for a decompressed gzip request payload, respectively. + +Here's an example OPA configuration using the new keys: + +```yaml +# Set max request size to 64 MB and max gzip size (decompressed) to be 128 MB. +server: + decoding: + max_length: 67108864 + gzip: + max_length: 134217728 +``` + +These changes allow improvements in memory usage for the OPA HTTP server, and help OPA deployments avoid some accidental out-of-memory situations. + +### Breaking Changes + +OPA now automatically rejects very large requests. Requests with a `Content-Length` larger than 128 MB uncompressed, and gzipped requests with payloads that decompress to larger than 256 MB will be rejected, as part of hardening OPA against denial-of-service attacks. Previously, a large enough request could cause an OPA instance to run out of memory in low-memory sidecar deployment scenarios, just from attempting to read the request body into memory. + +For most users, no changes will be needed to continue using OPA. However, for those who need to override the default limits, the new `server.decoding.max_length` and `server.decoding.gzip.max_length` configuration fields allow setting higher request size limits. + ## 0.66.0 This release contains a mix of features, performance improvements, and bugfixes. diff --git a/config/config.go b/config/config.go index 2e3fa10b77..87ab109113 100644 --- a/config/config.go +++ b/config/config.go @@ -39,6 +39,7 @@ type Config struct { DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` Server *struct { Encoding json.RawMessage `json:"encoding,omitempty"` + Decoding json.RawMessage `json:"decoding,omitempty"` Metrics json.RawMessage `json:"metrics,omitempty"` } `json:"server,omitempty"` Storage *struct { diff --git a/config/config_test.go b/config/config_test.go index 3922749be7..ebe655263e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -197,6 +197,12 @@ func TestActiveConfig(t *testing.T) { "some-plugin": {} }, "server": { + "decoding": { + "max_length": 134217728, + "gzip": { + "max_length": 268435456 + } + }, "encoding": { "gzip": { "min_length": 1024, @@ -265,6 +271,12 @@ func TestActiveConfig(t *testing.T) { "some-plugin": {} }, "server": { + "decoding": { + "max_length": 134217728, + "gzip": { + "max_length": 268435456 + } + }, "encoding": { "gzip": { "min_length": 1024, diff --git a/docs/content/configuration.md b/docs/content/configuration.md index c92fd1ae1b..63816913a6 100644 --- a/docs/content/configuration.md +++ b/docs/content/configuration.md @@ -78,9 +78,13 @@ distributed_tracing: encryption: "off" server: + decoding: + max_length: 134217728 + gzip: + max_length: 268435456 encoding: gzip: - min_length: 1024, + min_length: 1024 compression_level: 9 ``` @@ -886,14 +890,19 @@ See [the docs on disk storage](../storage/) for details about the settings. ## Server The `server` configuration sets: -- the gzip compression settings for `/v0/data`, `/v1/data` and `/v1/compile` HTTP `POST` endpoints +- for all incoming requests: + - maximum allowed request size + - maximum decompressed gzip payload size +- the gzip compression settings for responses from the `/v0/data`, `/v1/data` and `/v1/compile` HTTP `POST` endpoints The gzip compression settings are used when the client sends `Accept-Encoding: gzip` - buckets for `http_request_duration_seconds` histogram -| Field | Type | Required | Description | -|-------------------------------------------------------------|-------------|---------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `server.encoding.gzip.min_length` | `int` | No, (default: 1024) | Specifies the minimum length of the response to compress | -| `server.encoding.gzip.compression_level` | `int` | No, (default: 9) | Specifies the compression level. Accepted values: a value of either 0 (no compression), 1 (best speed, lowest compression) or 9 (slowest, best compression). See https://pkg.go.dev/compress/flate#pkg-constants | +| Field | Type| Required | Description | +| --- | --- | --- | --- | +| `server.decoding.max_length` | `int` | No, (default: 268435456) | Specifies the maximum allowed number of bytes to read from a request body. | +| `server.decoding.gzip.max_length` | `int` | No, (default: 536870912) | Specifies the maximum allowed number of bytes to read from the gzip decompressor for gzip-encoded requests. | +| `server.encoding.gzip.min_length` | `int` | No, (default: 1024) | Specifies the minimum length of the response to compress. | +| `server.encoding.gzip.compression_level` | `int` | No, (default: 9) | Specifies the compression level. Accepted values: a value of either 0 (no compression), 1 (best speed, lowest compression) or 9 (slowest, best compression). See https://pkg.go.dev/compress/flate#pkg-constants | | `server.metrics.prom.http_request_duration_seconds.buckets` | `[]float64` | No, (default: [1e-6, 5e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 0.01, 0.1, 1 ]) | Specifies the buckets for the `http_request_duration_seconds` metric. Each value is a float, it is expressed in seconds and subdivisions of it. E.g `1e-6` is 1 microsecond, `1e-3` 1 millisecond, `0.01` 10 milliseconds | ## Miscellaneous diff --git a/plugins/server/decoding/config.go b/plugins/server/decoding/config.go new file mode 100644 index 0000000000..5cd009637d --- /dev/null +++ b/plugins/server/decoding/config.go @@ -0,0 +1,102 @@ +// Package decoding implements the configuration side of the upgraded gzip +// decompression framework. The original work only enabled gzip decoding for +// a few endpoints-- here we enable if for all of OPA. Additionally, we provide +// some new defensive configuration options: max_length, and gzip.max_length. +// These allow rejecting requests that indicate their contents are larger than +// the size limits. +// +// The request handling pipeline now looks roughly like this: +// +// Request -> MaxBytesReader(Config.MaxLength) -> ir.CopyN(dest, req, Gzip.MaxLength) +// +// The intent behind this design is to improve how OPA handles large and/or +// malicious requests, compressed or otherwise. The benefit of being a little +// more strict in what we allow is that we can now use "riskier", but +// dramatically more performant techniques, like preallocating content buffers +// for gzipped data. This also should help OPAs in limited memory situations. +package decoding + +import ( + "fmt" + + "github.com/open-policy-agent/opa/util" +) + +var ( + defaultMaxRequestLength = int64(268435456) // 256 MB + defaultGzipMaxContentLength = int64(536870912) // 512 MB +) + +// Config represents the configuration for the Server.Decoding settings +type Config struct { + MaxLength *int64 `json:"max_length,omitempty"` // maximum request size that will be read, regardless of compression. + Gzip *Gzip `json:"gzip,omitempty"` +} + +// Gzip represents the configuration for the Server.Decoding.Gzip settings +type Gzip struct { + MaxLength *int64 `json:"max_length,omitempty"` // Max number of bytes allowed to be read from the decompressor. +} + +// ConfigBuilder assists in the construction of the plugin configuration. +type ConfigBuilder struct { + raw []byte +} + +// NewConfigBuilder returns a new ConfigBuilder to build and parse the server config +func NewConfigBuilder() *ConfigBuilder { + return &ConfigBuilder{} +} + +// WithBytes sets the raw server config +func (b *ConfigBuilder) WithBytes(config []byte) *ConfigBuilder { + b.raw = config + return b +} + +// Parse returns a valid Config object with defaults injected. +func (b *ConfigBuilder) Parse() (*Config, error) { + if b.raw == nil { + defaultConfig := &Config{ + MaxLength: &defaultMaxRequestLength, + Gzip: &Gzip{ + MaxLength: &defaultGzipMaxContentLength, + }, + } + return defaultConfig, nil + } + + var result Config + + if err := util.Unmarshal(b.raw, &result); err != nil { + return nil, err + } + + return &result, result.validateAndInjectDefaults() +} + +// validateAndInjectDefaults populates defaults if the fields are nil, then +// validates the config values. +func (c *Config) validateAndInjectDefaults() error { + if c.MaxLength == nil { + c.MaxLength = &defaultMaxRequestLength + } + + if c.Gzip == nil { + c.Gzip = &Gzip{ + MaxLength: &defaultGzipMaxContentLength, + } + } + if c.Gzip.MaxLength == nil { + c.Gzip.MaxLength = &defaultGzipMaxContentLength + } + + if *c.MaxLength <= 0 { + return fmt.Errorf("invalid value for server.decoding.max_length field, should be a positive number") + } + if *c.Gzip.MaxLength <= 0 { + return fmt.Errorf("invalid value for server.decoding.gzip.max_length field, should be a positive number") + } + + return nil +} diff --git a/plugins/server/decoding/config_test.go b/plugins/server/decoding/config_test.go new file mode 100644 index 0000000000..f8751092f3 --- /dev/null +++ b/plugins/server/decoding/config_test.go @@ -0,0 +1,108 @@ +package decoding + +import ( + "fmt" + "testing" +) + +func TestConfigValidation(t *testing.T) { + tests := []struct { + input string + wantErr bool + }{ + { + input: `{}`, + wantErr: false, + }, + { + input: `{"gzip": {"max_length": "not-a-number"}}`, + wantErr: true, + }, + { + input: `{"gzip": {max_length": 42}}`, + wantErr: false, + }, + { + input: `{"gzip":{"max_length": "42"}}`, + wantErr: true, + }, + { + input: `{"gzip":{"max_length": 0}}`, + wantErr: true, + }, + { + input: `{"gzip":{"max_length": -10}}`, + wantErr: true, + }, + { + input: `{"gzip":{"random_key": 0}}`, + wantErr: false, + }, + { + input: `{"gzip": {"max_length": -10}}`, + wantErr: true, + }, + { + input: `{"max_length": "not-a-number"}`, + wantErr: true, + }, + { + input: `{"gzip":{}}`, + wantErr: false, + }, + { + input: `{"max_length": "not-a-number", "gzip":{}}`, + wantErr: true, + }, + { + input: `{"max_length": 42, "gzip":{"max_length": 42}}`, + wantErr: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("TestConfigValidation_case_%d", i), func(t *testing.T) { + _, err := NewConfigBuilder().WithBytes([]byte(test.input)).Parse() + if err != nil && !test.wantErr { + t.Fatalf("Unexpected error: %s", err.Error()) + } + if err == nil && test.wantErr { + t.Fail() + } + }) + } +} + +func TestConfigValue(t *testing.T) { + tests := []struct { + input string + maxLengthExpectedValue int64 + gzipMaxLengthExpectedValue int64 + }{ + { + input: `{}`, + maxLengthExpectedValue: 268435456, + gzipMaxLengthExpectedValue: 536870912, + }, + { + input: `{"max_length": 5, "gzip":{"max_length": 42}}`, + maxLengthExpectedValue: 5, + gzipMaxLengthExpectedValue: 42, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("TestConfigValue_case_%d", i), func(t *testing.T) { + config, err := NewConfigBuilder().WithBytes([]byte(test.input)).Parse() + if err != nil { + t.Fatalf("Error building configuration: %s", err.Error()) + } + if *config.MaxLength != test.maxLengthExpectedValue { + t.Fatalf("Unexpected config value for max_length (exp/actual): %d, %d", test.maxLengthExpectedValue, *config.MaxLength) + } + if *config.Gzip.MaxLength != test.gzipMaxLengthExpectedValue { + t.Fatalf("Unexpected config value for gzip.max_length (exp/actual): %d, %d", test.gzipMaxLengthExpectedValue, *config.Gzip.MaxLength) + } + }) + } +} diff --git a/server/authorizer/authorizer.go b/server/authorizer/authorizer.go index 10d65ecd88..8dcc3c3394 100644 --- a/server/authorizer/authorizer.go +++ b/server/authorizer/authorizer.go @@ -7,7 +7,6 @@ package authorizer import ( "context" - "io" "net/http" "net/url" "strings" @@ -163,11 +162,7 @@ func makeInput(r *http.Request) (*http.Request, interface{}, error) { if expectBody(r.Method, path) { var err error - plaintextBody, err := util.ReadMaybeCompressedBody(r) - if err != nil { - return r, nil, err - } - rawBody, err = io.ReadAll(plaintextBody) + rawBody, err = util.ReadMaybeCompressedBody(r) if err != nil { return r, nil, err } diff --git a/server/handlers/decoding.go b/server/handlers/decoding.go new file mode 100644 index 0000000000..696d111c1c --- /dev/null +++ b/server/handlers/decoding.go @@ -0,0 +1,40 @@ +package handlers + +import ( + "net/http" + "strings" + + "github.com/open-policy-agent/opa/server/types" + "github.com/open-policy-agent/opa/server/writer" + util_decoding "github.com/open-policy-agent/opa/util/decoding" +) + +// This handler provides hard limits on the size of the request body, for both +// the raw body content, and also for the decompressed size when gzip +// compression is used. +// +// The Content-Length restriction happens here in the handler, but the +// decompressed size limit is enforced later, in `util.ReadMaybeCompressedBody`. +// The handler passes the gzip size limits down to that function through the +// request context whenever gzip encoding is present. +func DecodingLimitsHandler(handler http.Handler, maxLength, gzipMaxLength int64) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Reject too-large requests before doing any further processing. + // Note(philipc): This likely does nothing in the case of "chunked" + // requests, since those should report a ContentLength of -1. + if r.ContentLength > maxLength { + writer.Error(w, http.StatusBadRequest, types.NewErrorV1(types.CodeInvalidParameter, types.MsgDecodingLimitError)) + return + } + // Pass server.decoding.gzip.max_length down, using the request context. + if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { + ctx := util_decoding.AddServerDecodingGzipMaxLen(r.Context(), gzipMaxLength) + r = r.WithContext(ctx) + } + + // Copied over from the net/http package; enforces max body read limits. + r2 := *r + r2.Body = http.MaxBytesReader(w, r.Body, maxLength) + handler.ServeHTTP(w, &r2) + }) +} diff --git a/server/server.go b/server/server.go index 398126fac7..d3ef6686f6 100644 --- a/server/server.go +++ b/server/server.go @@ -24,6 +24,7 @@ import ( "sync" "time" + serverDecodingPlugin "github.com/open-policy-agent/opa/plugins/server/decoding" serverEncodingPlugin "github.com/open-policy-agent/opa/plugins/server/encoding" "github.com/gorilla/mux" @@ -215,6 +216,11 @@ func (s *Server) Init(ctx context.Context) (*Server, error) { } s.DiagnosticHandler = s.initHandlerAuthn(s.DiagnosticHandler) + s.Handler, err = s.initHandlerDecodingLimits(s.Handler) + if err != nil { + return nil, err + } + return s, s.store.Commit(ctx, txn) } @@ -752,6 +758,24 @@ func (s *Server) initHandlerAuthz(handler http.Handler) http.Handler { return handler } +// Enforces request body size limits on incoming requests. For gzipped requests, +// it passes the size limit down the body-reading method via the request +// context. +func (s *Server) initHandlerDecodingLimits(handler http.Handler) (http.Handler, error) { + var decodingRawConfig json.RawMessage + serverConfig := s.manager.Config.Server + if serverConfig != nil { + decodingRawConfig = serverConfig.Decoding + } + decodingConfig, err := serverDecodingPlugin.NewConfigBuilder().WithBytes(decodingRawConfig).Parse() + if err != nil { + return nil, err + } + decodingHandler := handlers.DecodingLimitsHandler(handler, *decodingConfig.MaxLength, *decodingConfig.Gzip.MaxLength) + + return decodingHandler, nil +} + func (s *Server) initHandlerCompression(handler http.Handler) (http.Handler, error) { var encodingRawConfig json.RawMessage serverConfig := s.manager.Config.Server @@ -2731,7 +2755,7 @@ func readInputV0(r *http.Request) (ast.Value, *interface{}, error) { } // decompress the input if sent as zip - body, err := util.ReadMaybeCompressedBody(r) + bodyBytes, err := util.ReadMaybeCompressedBody(r) if err != nil { return nil, nil, fmt.Errorf("could not decompress the body: %w", err) } @@ -2739,17 +2763,13 @@ func readInputV0(r *http.Request) (ast.Value, *interface{}, error) { var x interface{} if strings.Contains(r.Header.Get("Content-Type"), "yaml") { - bs, err := io.ReadAll(body) - if err != nil { - return nil, nil, err - } - if len(bs) > 0 { - if err = util.Unmarshal(bs, &x); err != nil { + if len(bodyBytes) > 0 { + if err = util.Unmarshal(bodyBytes, &x); err != nil { return nil, nil, fmt.Errorf("body contains malformed input document: %w", err) } } } else { - dec := util.NewJSONDecoder(body) + dec := util.NewJSONDecoder(bytes.NewBuffer(bodyBytes)) if err := dec.Decode(&x); err != nil && err != io.EOF { return nil, nil, fmt.Errorf("body contains malformed input document: %w", err) } @@ -2784,7 +2804,7 @@ func readInputPostV1(r *http.Request) (ast.Value, *interface{}, error) { var request types.DataRequestV1 // decompress the input if sent as zip - body, err := util.ReadMaybeCompressedBody(r) + bodyBytes, err := util.ReadMaybeCompressedBody(r) if err != nil { return nil, nil, fmt.Errorf("could not decompress the body: %w", err) } @@ -2793,17 +2813,13 @@ func readInputPostV1(r *http.Request) (ast.Value, *interface{}, error) { // There is no standard for yaml mime-type so we just look for // anything related if strings.Contains(ct, "yaml") { - bs, err := io.ReadAll(body) - if err != nil { - return nil, nil, err - } - if len(bs) > 0 { - if err = util.Unmarshal(bs, &request); err != nil { + if len(bodyBytes) > 0 { + if err = util.Unmarshal(bodyBytes, &request); err != nil { return nil, nil, fmt.Errorf("body contains malformed input document: %w", err) } } } else { - dec := util.NewJSONDecoder(body) + dec := util.NewJSONDecoder(bytes.NewBuffer(bodyBytes)) if err := dec.Decode(&request); err != nil && err != io.EOF { return nil, nil, fmt.Errorf("body contains malformed input document: %w", err) } @@ -2828,11 +2844,10 @@ type compileRequestOptions struct { DisableInlining []string } -func readInputCompilePostV1(r io.ReadCloser) (*compileRequest, *types.ErrorV1) { - +func readInputCompilePostV1(reqBytes []byte) (*compileRequest, *types.ErrorV1) { var request types.CompileRequestV1 - err := util.NewJSONDecoder(r).Decode(&request) + err := util.NewJSONDecoder(bytes.NewBuffer(reqBytes)).Decode(&request) if err != nil { return nil, types.NewErrorV1(types.CodeInvalidParameter, "error(s) occurred while decoding request: %v", err.Error()) } diff --git a/server/server_test.go b/server/server_test.go index 7e640678b9..2ba52ee2d0 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -32,6 +32,7 @@ import ( "path/filepath" "reflect" "sort" + "strconv" "strings" "sync/atomic" "testing" @@ -1823,6 +1824,160 @@ allow if { } } +// Tests to ensure the body size limits work, for compressed requests. +func TestDataPostV1CompressedDecodingLimits(t *testing.T) { + defaultMaxLen := int64(1024) + defaultGzipMaxLen := int64(1024) + + tests := []struct { + note string + wantGzip bool + payload []byte + forceContentLen int64 // Size to manually set the Content-Length header to. + forcePayloadSizeField uint32 // Size to manually set the payload field for the gzip blob. + expRespHTTPStatus int + expErrorMsg string + maxLen int64 + gzipMaxLen int64 + }{ + { + note: "empty message", + payload: []byte{}, + expRespHTTPStatus: 200, + }, + { + note: "empty message, gzip", + wantGzip: true, + payload: mustGZIPPayload([]byte{}), + expRespHTTPStatus: 200, + }, + { + note: "empty message, malicious Content-Length", + payload: []byte{}, + forceContentLen: 2048, // Server should ignore this header entirely. + expRespHTTPStatus: 200, + }, + { + note: "empty message, gzip, malicious Content-Length", + wantGzip: true, + payload: mustGZIPPayload([]byte{}), + forceContentLen: 2048, // Server should ignore this header entirely. + expRespHTTPStatus: 200, + }, + { + note: "basic - malicious size field, expect reject on gzip payload length", + wantGzip: true, + payload: mustGZIPPayload([]byte(`{"user": "alice"}`)), + expRespHTTPStatus: 400, + forcePayloadSizeField: 134217728, // 128 MB + expErrorMsg: "gzip payload too large", + gzipMaxLen: 1024, + }, + { + note: "basic, large payload", + payload: util.MustMarshalJSON(generateJSONBenchmarkData(100, 100)), + expRespHTTPStatus: 200, + maxLen: 134217728, + }, + { + note: "basic, large payload, expect reject on Content-Length", + payload: util.MustMarshalJSON(generateJSONBenchmarkData(100, 100)), + expRespHTTPStatus: 400, + maxLen: 512, + expErrorMsg: "request body too large", + }, + { + note: "basic, gzip, large payload", + wantGzip: true, + payload: mustGZIPPayload(util.MustMarshalJSON(generateJSONBenchmarkData(100, 100))), + expRespHTTPStatus: 200, + maxLen: 1024, + gzipMaxLen: 134217728, + }, + { + note: "basic, gzip, large payload, expect reject on gzip payload length", + wantGzip: true, + payload: mustGZIPPayload(util.MustMarshalJSON(generateJSONBenchmarkData(100, 100))), + expRespHTTPStatus: 400, + maxLen: 1024, + gzipMaxLen: 10, + expErrorMsg: "gzip payload too large", + }, + } + + for _, test := range tests { + t.Run(test.note, func(t *testing.T) { + ctx := context.Background() + store := inmem.New() + txn := storage.NewTransactionOrDie(ctx, store, storage.WriteParams) + examplePolicy := `package example.authz + +import rego.v1 + +default allow := false # Reject requests by default. + +allow if { + # Logic to authorize request goes here. + input.body.user == "alice" +} +` + + if err := store.UpsertPolicy(ctx, txn, "test", []byte(examplePolicy)); err != nil { + panic(err) + } + + if err := store.Commit(ctx, txn); err != nil { + panic(err) + } + + opts := [](func(*Server)){ + func(s *Server) { + s.WithStore(store) + }, + } + + // Set defaults for max_length configs, if not specified in the test case. + if test.maxLen == 0 { + test.maxLen = defaultMaxLen + } + if test.gzipMaxLen == 0 { + test.gzipMaxLen = defaultGzipMaxLen + } + + f := newFixtureWithConfig(t, fmt.Sprintf(`{"server":{"decision_logs": %t, "decoding":{"max_length": %d, "gzip": {"max_length": %d}}}}`, true, test.maxLen, test.gzipMaxLen), opts...) + + // Forcibly replace the size trailer field for the gzip blob. + // Byte order is little-endian, field is a uint32. + if test.forcePayloadSizeField != 0 { + binary.LittleEndian.PutUint32(test.payload[len(test.payload)-4:], test.forcePayloadSizeField) + } + + // execute the request + req := newReqV1(http.MethodPost, "/data/test", string(test.payload)) + if test.wantGzip { + req.Header.Set("Content-Encoding", "gzip") + } + if test.forceContentLen > 0 { + req.Header.Set("Content-Length", strconv.FormatInt(test.forceContentLen, 10)) + } + f.reset() + f.server.Handler.ServeHTTP(f.recorder, req) + if f.recorder.Code != test.expRespHTTPStatus { + t.Fatalf("Unexpected HTTP status code, (exp,got): %d, %d", test.expRespHTTPStatus, f.recorder.Code) + } + if test.expErrorMsg != "" { + var serverErr types.ErrorV1 + if err := json.Unmarshal(f.recorder.Body.Bytes(), &serverErr); err != nil { + t.Fatalf("Could not deserialize error message: %s, message was: %s", err.Error(), f.recorder.Body.Bytes()) + } + if !strings.Contains(serverErr.Message, test.expErrorMsg) { + t.Fatalf("Expected error message to have message '%s', got message: '%s'", test.expErrorMsg, serverErr.Message) + } + } + }) + } +} + func TestDataPostV0CompressedResponse(t *testing.T) { tests := []struct { gzipMinLength int diff --git a/server/types/types.go b/server/types/types.go index 5f71481ddc..37917913d4 100644 --- a/server/types/types.go +++ b/server/types/types.go @@ -81,6 +81,8 @@ const ( MsgMissingError = "document missing" MsgFoundUndefinedError = "document undefined" MsgPluginConfigError = "error(s) occurred while configuring plugin(s)" + MsgDecodingLimitError = "request body too large" + MsgDecodingGzipLimitError = "compressed request body too large" ) // PatchV1 models a single patch operation against a document. diff --git a/util/decoding/context.go b/util/decoding/context.go new file mode 100644 index 0000000000..cba5e40ed5 --- /dev/null +++ b/util/decoding/context.go @@ -0,0 +1,21 @@ +package decoding + +import "context" + +type requestContextKey string + +// Note(philipc): We can add functions later to add the max request body length +// to contexts, if we ever need to. +const ( + reqCtxKeyMaxLen = requestContextKey("server-decoding-plugin-context-max-length") + reqCtxKeyGzipMaxLen = requestContextKey("server-decoding-plugin-context-gzip-max-length") +) + +func AddServerDecodingGzipMaxLen(ctx context.Context, maxLen int64) context.Context { + return context.WithValue(ctx, reqCtxKeyGzipMaxLen, maxLen) +} + +func GetServerDecodingGzipMaxLen(ctx context.Context) (int64, bool) { + gzipMaxLength, ok := ctx.Value(reqCtxKeyGzipMaxLen).(int64) + return gzipMaxLength, ok +} diff --git a/util/read_gzip_body.go b/util/read_gzip_body.go index 2e33cae5fa..c6a1098a44 100644 --- a/util/read_gzip_body.go +++ b/util/read_gzip_body.go @@ -3,24 +3,68 @@ package util import ( "bytes" "compress/gzip" + "encoding/binary" + "fmt" "io" "net/http" "strings" + "sync" + + "github.com/open-policy-agent/opa/util/decoding" ) +var gzipReaderPool = sync.Pool{ + New: func() interface{} { + reader := new(gzip.Reader) + return reader + }, +} + // Note(philipc): Originally taken from server/server.go -func ReadMaybeCompressedBody(r *http.Request) (io.ReadCloser, error) { +// The DecodingLimitHandler handles validating that the gzip payload is within the +// allowed max size limit. Thus, in the event of a forged payload size trailer, +// the worst that can happen is that we waste memory up to the allowed max gzip +// payload size, but not an unbounded amount of memory, as was potentially +// possible before. +func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { + if r.ContentLength <= 0 { + return []byte{}, nil + } + // Read content from the request body into a buffer of known size. + content := bytes.NewBuffer(make([]byte, 0, r.ContentLength)) + if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil { + return content.Bytes(), err + } + + // Decompress gzip content by reading from the buffer. if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { - gzReader, err := gzip.NewReader(r.Body) - if err != nil { + // Fetch gzip payload size limit from request context. + gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context()) + + // Note(philipc): The last 4 bytes of a well-formed gzip blob will + // always be a little-endian uint32, representing the decompressed + // content size, modulo 2^32. We validate that the size is safe, + // earlier in DecodingLimitHandler. + sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:]) + if sizeTrailerField > uint32(gzipMaxLength) { + return content.Bytes(), fmt.Errorf("gzip payload too large") + } + // Pull a gzip decompressor from the pool, and assign it to the current + // buffer, using Reset(). Later, return it back to the pool for another + // request to use. + gzReader := gzipReaderPool.Get().(*gzip.Reader) + if err := gzReader.Reset(content); err != nil { return nil, err } defer gzReader.Close() - bytesBody, err := io.ReadAll(gzReader) - if err != nil { - return nil, err + defer gzipReaderPool.Put(gzReader) + decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField)) + if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil { + return decompressedContent.Bytes(), err } - return io.NopCloser(bytes.NewReader(bytesBody)), err + return decompressedContent.Bytes(), nil } - return r.Body, nil + + // Request was not compressed; return the content bytes. + return content.Bytes(), nil }