diff --git a/go.mod b/go.mod index 90e519dee8..ada4f7dee5 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/google/go-intervals v0.0.2 github.com/hashicorp/go-multierror v1.1.1 github.com/json-iterator/go v1.1.12 + github.com/klauspost/compress v1.13.6 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-shellwords v1.0.12 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible @@ -26,7 +27,6 @@ require ( github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/tchap/go-patricia v2.3.0+incompatible github.com/ulikunitz/xz v0.5.10 - github.com/valyala/gozstd v1.15.1 github.com/vbatts/tar-split v0.11.2 golang.org/x/net v0.0.0-20210825183410-e898025ed96a golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 diff --git a/go.sum b/go.sum index 98abff9a36..f0eaf86ccb 100644 --- a/go.sum +++ b/go.sum @@ -635,8 +635,6 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/gozstd v1.15.1 h1:hpmJYWOpNs0rjDOMdbwWZplfNlfh1tOy0v7XiR9+iGQ= -github.com/valyala/gozstd v1.15.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= diff --git a/pkg/archive/archive_zstd_cgo.go b/pkg/archive/archive_zstd.go similarity index 66% rename from pkg/archive/archive_zstd_cgo.go rename to pkg/archive/archive_zstd.go index cfe0c14857..36b7118aa8 100644 --- a/pkg/archive/archive_zstd_cgo.go +++ b/pkg/archive/archive_zstd.go @@ -1,24 +1,22 @@ -// +build linux,cgo - package archive import ( "io" - zstd "github.com/valyala/gozstd" + "github.com/klauspost/compress/zstd" ) type wrapperZstdDecoder struct { - decoder *zstd.Reader + decoder *zstd.Decoder } func (w *wrapperZstdDecoder) Close() error { - w.decoder.Release() + w.decoder.Close() return nil } func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { - return zstd.Decompress(dst, input) + return w.decoder.DecodeAll(input, dst) } func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { @@ -26,8 +24,7 @@ func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { } func (w *wrapperZstdDecoder) Reset(r io.Reader) error { - w.decoder.Reset(r, nil) - return nil + return w.decoder.Reset(r) } func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { @@ -35,10 +32,10 @@ func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { } func zstdReader(buf io.Reader) (io.ReadCloser, error) { - decoder := zstd.NewReader(buf) - return &wrapperZstdDecoder{decoder: decoder}, nil + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err } func zstdWriter(dest io.Writer) (io.WriteCloser, error) { - return zstd.NewWriter(dest), nil + return zstd.NewWriter(dest) } diff --git a/pkg/archive/archive_zstd_nocgo.go b/pkg/archive/archive_zstd_nocgo.go deleted file mode 100644 index 117f2bbba1..0000000000 --- a/pkg/archive/archive_zstd_nocgo.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !linux !cgo - -package archive - -import ( - "fmt" - "io" -) - -type wrapperZstdDecoder struct { -} - -func (w *wrapperZstdDecoder) Close() error { - return fmt.Errorf("zstd not supported without cgo") -} - -func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { - return nil, fmt.Errorf("zstd not supported without cgo") -} - -func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { - return -1, fmt.Errorf("zstd not supported without cgo") -} - -func (w *wrapperZstdDecoder) Reset(r io.Reader) error { - return fmt.Errorf("zstd not supported without cgo") -} - -func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { - return -1, fmt.Errorf("zstd not supported without cgo") -} - -func zstdReader(buf io.Reader) (io.ReadCloser, error) { - return nil, fmt.Errorf("zstd not supported without cgo") -} - -func zstdWriter(dest io.Writer) (io.WriteCloser, error) { - return nil, fmt.Errorf("zstd not supported without cgo") -} diff --git a/pkg/chunked/compression.go b/pkg/chunked/compression.go index ae7fa763af..96254bc4e5 100644 --- a/pkg/chunked/compression.go +++ b/pkg/chunked/compression.go @@ -11,10 +11,10 @@ import ( "github.com/containerd/stargz-snapshotter/estargz" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - zstd "github.com/valyala/gozstd" "github.com/vbatts/tar-split/archive/tar" ) @@ -256,8 +256,14 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return nil, 0, errors.New("invalid manifest checksum") } + decoder, err := zstd.NewReader(nil) + if err != nil { + return nil, 0, err + } + defer decoder.Close() + b := make([]byte, 0, lengthUncompressed) - if decoded, err := zstd.Decompress(b, manifest); err == nil { + if decoded, err := decoder.DecodeAll(manifest, b); err == nil { return decoded, int64(offset), nil } diff --git a/pkg/chunked/compressor/compressor.go b/pkg/chunked/compressor/compressor.go index 7893cf9aab..aeb7cfd4f0 100644 --- a/pkg/chunked/compressor/compressor.go +++ b/pkg/chunked/compressor/compressor.go @@ -215,7 +215,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r defer func() { if zstdWriter != nil { zstdWriter.Close() - zstdWriter.Release() + zstdWriter.Flush() } }() @@ -225,8 +225,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if err := zstdWriter.Close(); err != nil { return 0, err } + if err := zstdWriter.Flush(); err != nil { + return 0, err + } offset = dest.Count - zstdWriter.Reset(dest, nil, level) + zstdWriter.Reset(dest) } return offset, nil } diff --git a/pkg/chunked/internal/compression.go b/pkg/chunked/internal/compression.go index 1fce3e6f93..3bb5286d92 100644 --- a/pkg/chunked/internal/compression.go +++ b/pkg/chunked/internal/compression.go @@ -13,8 +13,8 @@ import ( "time" jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" - zstd "github.com/valyala/gozstd" ) type TOC struct { @@ -178,6 +178,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return appendZstdSkippableFrame(dest, manifestDataLE) } -func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Writer, error) { - return zstd.NewWriterLevel(dest, level), nil +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) } diff --git a/pkg/chunked/storage_linux.go b/pkg/chunked/storage_linux.go index 43a97ad46f..92b15c2bfd 100644 --- a/pkg/chunked/storage_linux.go +++ b/pkg/chunked/storage_linux.go @@ -28,11 +28,11 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" - zstd "github.com/valyala/gozstd" "github.com/vbatts/tar-split/archive/tar" "golang.org/x/sys/unix" ) @@ -63,8 +63,7 @@ type chunkedDiffer struct { copyBuffer []byte gzipReader *pgzip.Reader - - zstdReader *zstd.Reader + zstdReader *zstd.Decoder rawReader io.Reader } @@ -740,10 +739,15 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed case partCompression == fileTypeZstdChunked: c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.zstdReader == nil { - r := zstd.NewReader(c.rawReader) + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } c.zstdReader = r } else { - c.zstdReader.Reset(c.rawReader, nil) + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } } case partCompression == fileTypeEstargz: c.rawReader = io.LimitReader(from, mf.CompressedSize) @@ -804,7 +808,7 @@ func appendHole(fd int, size int64) error { func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { switch compression { case fileTypeZstdChunked: - defer c.zstdReader.Reset(nil, nil) + defer c.zstdReader.Reset(nil) if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { return err } @@ -1341,7 +1345,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra defer c.layersCache.release() defer func() { if c.zstdReader != nil { - c.zstdReader.Release() + c.zstdReader.Close() } }() diff --git a/vendor/github.com/valyala/gozstd/.gitignore b/vendor/github.com/valyala/gozstd/.gitignore deleted file mode 100644 index 6e92f57d46..0000000000 --- a/vendor/github.com/valyala/gozstd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -tags diff --git a/vendor/github.com/valyala/gozstd/.travis.yml b/vendor/github.com/valyala/gozstd/.travis.yml deleted file mode 100644 index b1d0fa5d61..0000000000 --- a/vendor/github.com/valyala/gozstd/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -os: - - linux - - osx - - freebsd - -go: - - 1.14 - -script: - # build test for supported platforms - - GOARCH=amd64 go build - - # run tests on a standard platform - - go test -v -coverprofile=coverage.txt -covermode=atomic - - go test -v -race - -after_success: - # Upload coverage results to codecov.io - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/valyala/gozstd/LICENSE b/vendor/github.com/valyala/gozstd/LICENSE deleted file mode 100644 index 6f665f3e29..0000000000 --- a/vendor/github.com/valyala/gozstd/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 Aliaksandr Valialkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/valyala/gozstd/Makefile b/vendor/github.com/valyala/gozstd/Makefile deleted file mode 100644 index c97c0ae326..0000000000 --- a/vendor/github.com/valyala/gozstd/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -GOOS ?= $(shell go env GOOS) -GOARCH ?= $(shell go env GOARCH) -GOOS_GOARCH := $(GOOS)_$(GOARCH) -GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) -LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a -ZSTD_VERSION ?= v1.5.1 -MUSL_BUILDER_IMAGE=golang:1.17.6-alpine -BUILDER_IMAGE := local/builder_musl:2.0.0-$(shell echo $(MUSL_BUILDER_IMAGE) | tr : _) - -.PHONY: libzstd.a - -libzstd.a: $(LIBZSTD_NAME) - -$(LIBZSTD_NAME): -ifeq ($(GOOS_GOARCH),$(GOOS_GOARCH_NATIVE)) - cd zstd/lib && ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a - mv zstd/lib/libzstd.a $(LIBZSTD_NAME) -else -ifeq ($(GOOS_GOARCH),linux_arm) - cd zstd/lib && CC=arm-linux-gnueabi-gcc ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a - mv zstd/lib/libzstd.a libzstd_linux_arm.a -endif -ifeq ($(GOOS_GOARCH),linux_arm64) - cd zstd/lib && CC=aarch64-linux-gnu-gcc ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a - mv zstd/lib/libzstd.a libzstd_linux_arm64.a -endif -ifeq ($(GOOS_GOARCH),linux_musl_amd64) - cd zstd/lib && ZSTD_LEGACY_SUPPORT=0 MOREFLAGS=$(MOREFLAGS) $(MAKE) clean libzstd.a - mv zstd/lib/libzstd.a libzstd_linux_musl_amd64.a -endif -endif - -package-builder: - (docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BUILDER_IMAGE)$$') \ - || docker build \ - --build-arg builder_image=$(MUSL_BUILDER_IMAGE) \ - --tag $(BUILDER_IMAGE) \ - builder - -package-musl: package-builder - docker run --rm \ - --user $(shell id -u):$(shell id -g) \ - --mount type=bind,src="$(shell pwd)",dst=/zstd \ - -w /zstd \ - $(DOCKER_OPTS) \ - $(BUILDER_IMAGE) \ - sh -c "GOOS=linux_musl make clean libzstd.a" - -clean: - rm -f $(LIBZSTD_NAME) - cd zstd && $(MAKE) clean - -update-zstd: - rm -rf zstd-tmp - git clone --branch $(ZSTD_VERSION) --depth 1 https://github.com/Facebook/zstd zstd-tmp - rm -rf zstd-tmp/.git - rm -rf zstd - mv zstd-tmp zstd - $(MAKE) clean libzstd.a - cp zstd/lib/zstd.h . - cp zstd/lib/zdict.h . - cp zstd/lib/zstd_errors.h . - -test: - CGO_ENABLED=1 GODEBUG=cgocheck=2 go test -v - -bench: - CGO_ENABLED=1 go test -bench=. diff --git a/vendor/github.com/valyala/gozstd/README.md b/vendor/github.com/valyala/gozstd/README.md deleted file mode 100644 index 7264357608..0000000000 --- a/vendor/github.com/valyala/gozstd/README.md +++ /dev/null @@ -1,96 +0,0 @@ -[![Build Status](https://travis-ci.org/valyala/gozstd.svg)](https://travis-ci.org/valyala/gozstd) -[![GoDoc](https://godoc.org/github.com/valyala/gozstd?status.svg)](http://godoc.org/github.com/valyala/gozstd) -[![Go Report](https://goreportcard.com/badge/github.com/valyala/gozstd)](https://goreportcard.com/report/github.com/valyala/gozstd) -[![codecov](https://codecov.io/gh/valyala/gozstd/branch/master/graph/badge.svg)](https://codecov.io/gh/valyala/gozstd) - -# gozstd - go wrapper for [zstd](http://facebook.github.io/zstd/) - - -## Features - - * Vendors upstream [zstd](https://github.com/facebook/zstd) without any modifications. - * [Simple API](https://godoc.org/github.com/valyala/gozstd). - * Optimized for speed. The API may be easily used in zero allocations mode. - * `Compress*` and `Decompress*` functions are optimized for high concurrency. - * Proper [Writer.Flush](https://godoc.org/github.com/valyala/gozstd#Writer.Flush) - for network apps. - * Supports the following features from upstream [zstd](https://facebook.github.io/zstd/): - * Block / stream compression / decompression with all the supported compression levels - and with dictionary support. - * [Dictionary](https://github.com/facebook/zstd#the-case-for-small-data-compression) - building from a sample set. The created dictionary may be saved to persistent storage / - transfered over the network. - * Dictionary loading for compression / decompression. - - Pull requests for missing upstream `zstd` features are welcome. - -## Quick start - - -### How to install `gozstd`? - -``` -go get -u github.com/valyala/gozstd -``` - -### How to compress data? - -The easiest way is just to use [Compress](https://godoc.org/github.com/valyala/gozstd#Compress): - -```go - compressedData := Compress(nil, data) -``` - -There is also [StreamCompress](https://godoc.org/github.com/valyala/gozstd#StreamCompress) -and [Writer](https://godoc.org/github.com/valyala/gozstd#Writer) for stream compression. - -### How to decompress data? - -The easiest way is just to use [Decompress](https://godoc.org/github.com/valyala/gozstd#Decompress): - -```go - data, err := Decompress(nil, compressedData) -``` - -There is also [StreamDecompress](https://godoc.org/github.com/valyala/gozstd#StreamDecompress) -and [Reader](https://godoc.org/github.com/valyala/gozstd#Reader) for stream decompression. - -### How to cross-compile gozstd? - -If you're cross-compiling some code that uses gozstd and you stumble upon the following error: -``` -# github.com/valyala/gozstd -/go/pkg/mod/github.com/valyala/gozstd@v1.6.2/stream.go:31:59: undefined: CDict -/go/pkg/mod/github.com/valyala/gozstd@v1.6.2/stream.go:35:64: undefined: CDict -/go/pkg/mod/github.com/valyala/gozstd@v1.6.2/stream.go:47:20: undefined: Writer -``` - -You can easily fix it by enabling [CGO](https://golang.org/cmd/cgo/) and using a cross-compiler (e.g. `arm-linux-gnueabi-gcc`): -```bash -env CC=arm-linux-gnueabi-gcc GOOS=linux GOARCH=arm CGO_ENABLED=1 go build ./main.go -``` - -**NOTE**: Check [#21](https://github.com/valyala/gozstd/issues/21) for more info. - -### Who uses gozstd? - -* [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - - -## FAQ - - * Q: _Which go version is supported?_ - A: `go1.10` and newer. Pull requests for older go versions are accepted. - - * Q: _Which platforms/architectures are supported?_ - A: `linux/amd64`, `linux/arm`, `linux/arm64`, `freebsd/amd64`, `darwin/amd64`, `darwin/arm64`, `windows/amd64`. Pull requests for other platforms/architectures - are accepted. - - * Q: _I don't trust `libzstd*.a` binary files from the repo or these files dont't work on my OS/ARCH. How to rebuild them?_ - A: Just run `make clean libzstd.a` if your OS/ARCH is supported. - - * Q: _How do I specify custom build flags when recompiling `libzstd*.a`?_ - A: You can specify MOREFLAGS=... variable when running `make` like this: `MOREFLAGS=-fPIC make clean libzstd.a`. - - * Q: _Why the repo contains `libzstd*.a` binary files?_ - A: This simplifies package installation to usual `go get` without additional steps for building the `libzstd*.a` diff --git a/vendor/github.com/valyala/gozstd/dict.go b/vendor/github.com/valyala/gozstd/dict.go deleted file mode 100644 index af759a5475..0000000000 --- a/vendor/github.com/valyala/gozstd/dict.go +++ /dev/null @@ -1,196 +0,0 @@ -package gozstd - -/* -#cgo CFLAGS: -O3 - -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" - -#define ZDICT_STATIC_LINKING_ONLY -#include "zdict.h" - -#include // for uintptr_t - -// The following *_wrapper functions allow avoiding memory allocations -// durting calls from Go. -// See https://github.com/golang/go/issues/24450 . - -static ZSTD_CDict* ZSTD_createCDict_wrapper(uintptr_t dictBuffer, size_t dictSize, int compressionLevel) { - return ZSTD_createCDict((const void *)dictBuffer, dictSize, compressionLevel); -} - -static ZSTD_DDict* ZSTD_createDDict_wrapper(uintptr_t dictBuffer, size_t dictSize) { - return ZSTD_createDDict((const void *)dictBuffer, dictSize); -} - -*/ -import "C" - -import ( - "fmt" - "runtime" - "sync" - "unsafe" -) - -const minDictLen = C.ZDICT_DICTSIZE_MIN - -// BuildDict returns dictionary built from the given samples. -// -// The resulting dictionary size will be close to desiredDictLen. -// -// The returned dictionary may be passed to NewCDict* and NewDDict. -func BuildDict(samples [][]byte, desiredDictLen int) []byte { - if desiredDictLen < minDictLen { - desiredDictLen = minDictLen - } - dict := make([]byte, desiredDictLen) - - // Calculate the total samples size. - samplesBufLen := 0 - for _, sample := range samples { - if len(sample) == 0 { - // Skip empty samples. - continue - } - samplesBufLen += len(sample) - } - - // Construct flat samplesBuf and samplesSizes. - samplesBuf := make([]byte, 0, samplesBufLen) - samplesSizes := make([]C.size_t, 0, len(samples)) - for _, sample := range samples { - samplesBuf = append(samplesBuf, sample...) - samplesSizes = append(samplesSizes, C.size_t(len(sample))) - } - - // Add fake samples if the original samples are too small. - minSamplesBufLen := int(C.ZDICT_CONTENTSIZE_MIN) - if minSamplesBufLen < minDictLen { - minSamplesBufLen = minDictLen - } - for samplesBufLen < minSamplesBufLen { - fakeSample := []byte(fmt.Sprintf("this is a fake sample %d", samplesBufLen)) - samplesBuf = append(samplesBuf, fakeSample...) - samplesSizes = append(samplesSizes, C.size_t(len(fakeSample))) - samplesBufLen += len(fakeSample) - } - - // Run ZDICT_trainFromBuffer under lock, since it looks like it - // is unsafe for concurrent usage (it just randomly crashes). - // TODO: remove this restriction. - - buildDictLock.Lock() - result := C.ZDICT_trainFromBuffer( - unsafe.Pointer(&dict[0]), - C.size_t(len(dict)), - unsafe.Pointer(&samplesBuf[0]), - &samplesSizes[0], - C.unsigned(len(samplesSizes))) - buildDictLock.Unlock() - if C.ZDICT_isError(result) != 0 { - // Return empty dictionary, since the original samples are too small. - return nil - } - - dictLen := int(result) - return dict[:dictLen] -} - -var buildDictLock sync.Mutex - -// CDict is a dictionary used for compression. -// -// A single CDict may be re-used in concurrently running goroutines. -type CDict struct { - p *C.ZSTD_CDict - compressionLevel int -} - -// NewCDict creates new CDict from the given dict. -// -// Call Release when the returned dict is no longer used. -func NewCDict(dict []byte) (*CDict, error) { - return NewCDictLevel(dict, DefaultCompressionLevel) -} - -// NewCDictLevel creates new CDict from the given dict -// using the given compressionLevel. -// -// Call Release when the returned dict is no longer used. -func NewCDictLevel(dict []byte, compressionLevel int) (*CDict, error) { - if len(dict) == 0 { - return nil, fmt.Errorf("dict cannot be empty") - } - - cd := &CDict{ - p: C.ZSTD_createCDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(&dict[0]))), - C.size_t(len(dict)), - C.int(compressionLevel)), - compressionLevel: compressionLevel, - } - // Prevent from GC'ing of dict during CGO call above. - runtime.KeepAlive(dict) - runtime.SetFinalizer(cd, freeCDict) - return cd, nil -} - -// Release releases resources occupied by cd. -// -// cd cannot be used after the release. -func (cd *CDict) Release() { - if cd.p == nil { - return - } - result := C.ZSTD_freeCDict(cd.p) - ensureNoError("ZSTD_freeCDict", result) - cd.p = nil -} - -func freeCDict(v interface{}) { - v.(*CDict).Release() -} - -// DDict is a dictionary used for decompression. -// -// A single DDict may be re-used in concurrently running goroutines. -type DDict struct { - p *C.ZSTD_DDict -} - -// NewDDict creates new DDict from the given dict. -// -// Call Release when the returned dict is no longer needed. -func NewDDict(dict []byte) (*DDict, error) { - if len(dict) == 0 { - return nil, fmt.Errorf("dict cannot be empty") - } - - dd := &DDict{ - p: C.ZSTD_createDDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(&dict[0]))), - C.size_t(len(dict))), - } - // Prevent from GC'ing of dict during CGO call above. - runtime.KeepAlive(dict) - runtime.SetFinalizer(dd, freeDDict) - return dd, nil -} - -// Release releases resources occupied by dd. -// -// dd cannot be used after the release. -func (dd *DDict) Release() { - if dd.p == nil { - return - } - - result := C.ZSTD_freeDDict(dd.p) - ensureNoError("ZSTD_freeDDict", result) - dd.p = nil -} - -func freeDDict(v interface{}) { - v.(*DDict).Release() -} diff --git a/vendor/github.com/valyala/gozstd/doc.go b/vendor/github.com/valyala/gozstd/doc.go deleted file mode 100644 index 322f4f1319..0000000000 --- a/vendor/github.com/valyala/gozstd/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package gozstd is Go wrapper for zstd. -// -// Gozstd is used in https://github.com/VictoriaMetrics/VictoriaMetrics . -package gozstd diff --git a/vendor/github.com/valyala/gozstd/go.mod b/vendor/github.com/valyala/gozstd/go.mod deleted file mode 100644 index f422410665..0000000000 --- a/vendor/github.com/valyala/gozstd/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/valyala/gozstd - -go 1.12 diff --git a/vendor/github.com/valyala/gozstd/gozstd.go b/vendor/github.com/valyala/gozstd/gozstd.go deleted file mode 100644 index cc9ffa4672..0000000000 --- a/vendor/github.com/valyala/gozstd/gozstd.go +++ /dev/null @@ -1,385 +0,0 @@ -package gozstd - -/* -#cgo CFLAGS: -O3 - -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" -#include "zstd_errors.h" - -#include // for uintptr_t - -// The following *_wrapper functions allow avoiding memory allocations -// durting calls from Go. -// See https://github.com/golang/go/issues/24450 . - -static size_t ZSTD_compressCCtx_wrapper(uintptr_t ctx, uintptr_t dst, size_t dstCapacity, uintptr_t src, size_t srcSize, int compressionLevel) { - return ZSTD_compressCCtx((ZSTD_CCtx*)ctx, (void*)dst, dstCapacity, (const void*)src, srcSize, compressionLevel); -} - -static size_t ZSTD_compress_usingCDict_wrapper(uintptr_t ctx, uintptr_t dst, size_t dstCapacity, uintptr_t src, size_t srcSize, uintptr_t cdict) { - return ZSTD_compress_usingCDict((ZSTD_CCtx*)ctx, (void*)dst, dstCapacity, (const void*)src, srcSize, (const ZSTD_CDict*)cdict); -} - -static size_t ZSTD_decompressDCtx_wrapper(uintptr_t ctx, uintptr_t dst, size_t dstCapacity, uintptr_t src, size_t srcSize) { - return ZSTD_decompressDCtx((ZSTD_DCtx*)ctx, (void*)dst, dstCapacity, (const void*)src, srcSize); -} - -static size_t ZSTD_decompress_usingDDict_wrapper(uintptr_t ctx, uintptr_t dst, size_t dstCapacity, uintptr_t src, size_t srcSize, uintptr_t ddict) { - return ZSTD_decompress_usingDDict((ZSTD_DCtx*)ctx, (void*)dst, dstCapacity, (const void*)src, srcSize, (const ZSTD_DDict*)ddict); -} - -static unsigned long long ZSTD_getFrameContentSize_wrapper(uintptr_t src, size_t srcSize) { - return ZSTD_getFrameContentSize((const void*)src, srcSize); -} -*/ -import "C" - -import ( - "fmt" - "io" - "runtime" - "sync" - "unsafe" -) - -// DefaultCompressionLevel is the default compression level. -const DefaultCompressionLevel = 3 // Obtained from ZSTD_CLEVEL_DEFAULT. - -// Compress appends compressed src to dst and returns the result. -func Compress(dst, src []byte) []byte { - return compressDictLevel(dst, src, nil, DefaultCompressionLevel) -} - -// CompressLevel appends compressed src to dst and returns the result. -// -// The given compressionLevel is used for the compression. -func CompressLevel(dst, src []byte, compressionLevel int) []byte { - return compressDictLevel(dst, src, nil, compressionLevel) -} - -// CompressDict appends compressed src to dst and returns the result. -// -// The given dictionary is used for the compression. -func CompressDict(dst, src []byte, cd *CDict) []byte { - return compressDictLevel(dst, src, cd, 0) -} - -func compressDictLevel(dst, src []byte, cd *CDict, compressionLevel int) []byte { - var cctx, cctxDict *cctxWrapper - if cd == nil { - cctx = cctxPool.Get().(*cctxWrapper) - } else { - cctxDict = cctxDictPool.Get().(*cctxWrapper) - } - - dst = compress(cctx, cctxDict, dst, src, cd, compressionLevel) - - if cd == nil { - cctxPool.Put(cctx) - } else { - cctxDictPool.Put(cctxDict) - } - return dst -} - -var cctxPool = &sync.Pool{ - New: newCCtx, -} - -var cctxDictPool = &sync.Pool{ - New: newCCtx, -} - -func newCCtx() interface{} { - cctx := C.ZSTD_createCCtx() - cw := &cctxWrapper{ - cctx: cctx, - } - runtime.SetFinalizer(cw, freeCCtx) - return cw -} - -func freeCCtx(cw *cctxWrapper) { - C.ZSTD_freeCCtx(cw.cctx) - cw.cctx = nil -} - -type cctxWrapper struct { - cctx *C.ZSTD_CCtx -} - -func compress(cctx, cctxDict *cctxWrapper, dst, src []byte, cd *CDict, compressionLevel int) []byte { - if len(src) == 0 { - return dst - } - - dstLen := len(dst) - if cap(dst) > dstLen { - // Fast path - try compressing without dst resize. - result := compressInternal(cctx, cctxDict, dst[dstLen:cap(dst)], src, cd, compressionLevel, false) - compressedSize := int(result) - if compressedSize >= 0 { - // All OK. - return dst[:dstLen+compressedSize] - } - if C.ZSTD_getErrorCode(result) != C.ZSTD_error_dstSize_tooSmall { - // Unexpected error. - panic(fmt.Errorf("BUG: unexpected error during compression with cd=%p: %s", cd, errStr(result))) - } - } - - // Slow path - resize dst to fit compressed data. - compressBound := int(C.ZSTD_compressBound(C.size_t(len(src)))) + 1 - if n := dstLen + compressBound - cap(dst) + dstLen; n > 0 { - // This should be optimized since go 1.11 - see https://golang.org/doc/go1.11#performance-compiler. - dst = append(dst[:cap(dst)], make([]byte, n)...) - } - - result := compressInternal(cctx, cctxDict, dst[dstLen:dstLen+compressBound], src, cd, compressionLevel, true) - compressedSize := int(result) - dst = dst[:dstLen+compressedSize] - if cap(dst)-len(dst) > 4096 { - // Re-allocate dst in order to remove superflouos capacity and reduce memory usage. - dst = append([]byte{}, dst...) - } - return dst -} - -func compressInternal(cctx, cctxDict *cctxWrapper, dst, src []byte, cd *CDict, compressionLevel int, mustSucceed bool) C.size_t { - if cd != nil { - result := C.ZSTD_compress_usingCDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(cctxDict.cctx))), - C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), - C.size_t(cap(dst)), - C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), - C.size_t(len(src)), - C.uintptr_t(uintptr(unsafe.Pointer(cd.p)))) - // Prevent from GC'ing of dst and src during CGO call above. - runtime.KeepAlive(dst) - runtime.KeepAlive(src) - if mustSucceed { - ensureNoError("ZSTD_compress_usingCDict_wrapper", result) - } - return result - } - result := C.ZSTD_compressCCtx_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(cctx.cctx))), - C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), - C.size_t(cap(dst)), - C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), - C.size_t(len(src)), - C.int(compressionLevel)) - // Prevent from GC'ing of dst and src during CGO call above. - runtime.KeepAlive(dst) - runtime.KeepAlive(src) - if mustSucceed { - ensureNoError("ZSTD_compressCCtx_wrapper", result) - } - return result -} - -// Decompress appends decompressed src to dst and returns the result. -func Decompress(dst, src []byte) ([]byte, error) { - return DecompressDict(dst, src, nil) -} - -// DecompressDict appends decompressed src to dst and returns the result. -// -// The given dictionary dd is used for the decompression. -func DecompressDict(dst, src []byte, dd *DDict) ([]byte, error) { - var dctx, dctxDict *dctxWrapper - if dd == nil { - dctx = dctxPool.Get().(*dctxWrapper) - } else { - dctxDict = dctxDictPool.Get().(*dctxWrapper) - } - - var err error - dst, err = decompress(dctx, dctxDict, dst, src, dd) - - if dd == nil { - dctxPool.Put(dctx) - } else { - dctxDictPool.Put(dctxDict) - } - return dst, err -} - -var dctxPool = &sync.Pool{ - New: newDCtx, -} - -var dctxDictPool = &sync.Pool{ - New: newDCtx, -} - -func newDCtx() interface{} { - dctx := C.ZSTD_createDCtx() - dw := &dctxWrapper{ - dctx: dctx, - } - runtime.SetFinalizer(dw, freeDCtx) - return dw -} - -func freeDCtx(dw *dctxWrapper) { - C.ZSTD_freeDCtx(dw.dctx) - dw.dctx = nil -} - -type dctxWrapper struct { - dctx *C.ZSTD_DCtx -} - -func decompress(dctx, dctxDict *dctxWrapper, dst, src []byte, dd *DDict) ([]byte, error) { - if len(src) == 0 { - return dst, nil - } - - dstLen := len(dst) - if cap(dst) > dstLen { - // Fast path - try decompressing without dst resize. - result := decompressInternal(dctx, dctxDict, dst[dstLen:cap(dst)], src, dd) - decompressedSize := int(result) - if decompressedSize >= 0 { - // All OK. - return dst[:dstLen+decompressedSize], nil - } - - if C.ZSTD_getErrorCode(result) != C.ZSTD_error_dstSize_tooSmall { - // Error during decompression. - return dst[:dstLen], fmt.Errorf("decompression error: %s", errStr(result)) - } - } - - // Slow path - resize dst to fit decompressed data. - decompressBound := int(C.ZSTD_getFrameContentSize_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), C.size_t(len(src)))) - // Prevent from GC'ing of src during CGO call above. - runtime.KeepAlive(src) - switch uint64(decompressBound) { - case uint64(C.ZSTD_CONTENTSIZE_UNKNOWN): - return streamDecompress(dst, src, dd) - case uint64(C.ZSTD_CONTENTSIZE_ERROR): - return dst, fmt.Errorf("cannot decompress invalid src") - } - decompressBound++ - - if n := dstLen + decompressBound - cap(dst); n > 0 { - // This should be optimized since go 1.11 - see https://golang.org/doc/go1.11#performance-compiler. - dst = append(dst[:cap(dst)], make([]byte, n)...) - } - - result := decompressInternal(dctx, dctxDict, dst[dstLen:dstLen+decompressBound], src, dd) - decompressedSize := int(result) - if decompressedSize >= 0 { - dst = dst[:dstLen+decompressedSize] - if cap(dst)-len(dst) > 4096 { - // Re-allocate dst in order to remove superflouos capacity and reduce memory usage. - dst = append([]byte{}, dst...) - } - return dst, nil - } - - // Error during decompression. - return dst[:dstLen], fmt.Errorf("decompression error: %s", errStr(result)) -} - -func decompressInternal(dctx, dctxDict *dctxWrapper, dst, src []byte, dd *DDict) C.size_t { - var n C.size_t - if dd != nil { - n = C.ZSTD_decompress_usingDDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(dctxDict.dctx))), - C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), - C.size_t(cap(dst)), - C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), - C.size_t(len(src)), - C.uintptr_t(uintptr(unsafe.Pointer(dd.p)))) - } else { - n = C.ZSTD_decompressDCtx_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(dctx.dctx))), - C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), - C.size_t(cap(dst)), - C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), - C.size_t(len(src))) - } - // Prevent from GC'ing of dst and src during CGO calls above. - runtime.KeepAlive(dst) - runtime.KeepAlive(src) - return n -} - -func errStr(result C.size_t) string { - errCode := C.ZSTD_getErrorCode(result) - errCStr := C.ZSTD_getErrorString(errCode) - return C.GoString(errCStr) -} - -func ensureNoError(funcName string, result C.size_t) { - if int(result) >= 0 { - // Fast path - avoid calling C function. - return - } - if C.ZSTD_getErrorCode(result) != 0 { - panic(fmt.Errorf("BUG: unexpected error in %s: %s", funcName, errStr(result))) - } -} - -func streamDecompress(dst, src []byte, dd *DDict) ([]byte, error) { - sd := getStreamDecompressor(dd) - sd.dst = dst - sd.src = src - _, err := sd.zr.WriteTo(sd) - dst = sd.dst - putStreamDecompressor(sd) - return dst, err -} - -type streamDecompressor struct { - dst []byte - src []byte - srcOffset int - - zr *Reader -} - -type srcReader streamDecompressor - -func (sr *srcReader) Read(p []byte) (int, error) { - sd := (*streamDecompressor)(sr) - n := copy(p, sd.src[sd.srcOffset:]) - sd.srcOffset += n - if n < len(p) { - return n, io.EOF - } - return n, nil -} - -func (sd *streamDecompressor) Write(p []byte) (int, error) { - sd.dst = append(sd.dst, p...) - return len(p), nil -} - -func getStreamDecompressor(dd *DDict) *streamDecompressor { - v := streamDecompressorPool.Get() - if v == nil { - sd := &streamDecompressor{ - zr: NewReader(nil), - } - v = sd - } - sd := v.(*streamDecompressor) - sd.zr.Reset((*srcReader)(sd), dd) - return sd -} - -func putStreamDecompressor(sd *streamDecompressor) { - sd.dst = nil - sd.src = nil - sd.srcOffset = 0 - sd.zr.Reset(nil, nil) - streamDecompressorPool.Put(sd) -} - -var streamDecompressorPool sync.Pool diff --git a/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.a b/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.a deleted file mode 100644 index 25b7a6cf50..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.go b/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.go deleted file mode 100644 index 087a47b2d3..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_darwin_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_darwin_amd64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.a b/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.a deleted file mode 100644 index c049c24d1e..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.go b/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.go deleted file mode 100644 index 8236da242c..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_darwin_arm64.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_darwin_arm64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.a b/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.a deleted file mode 100644 index 394a31a318..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.go b/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.go deleted file mode 100644 index eb5bca0f8b..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_freebsd_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_freebsd_amd64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.a b/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.a deleted file mode 100644 index cf8f8d87d1..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.go b/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.go deleted file mode 100644 index 82413af2ea..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_linux_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !musl - -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_linux_amd64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_arm.a b/vendor/github.com/valyala/gozstd/libzstd_linux_arm.a deleted file mode 100644 index 208fa2155a..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_linux_arm.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_arm.go b/vendor/github.com/valyala/gozstd/libzstd_linux_arm.go deleted file mode 100644 index b93bb9047e..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_linux_arm.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_linux_arm.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.a b/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.a deleted file mode 100644 index caaeb262d2..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.go b/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.go deleted file mode 100644 index e03ba9eca8..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_linux_arm64.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_linux_arm64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.a b/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.a deleted file mode 100644 index 15de36d018..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.go b/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.go deleted file mode 100644 index 67f6dd28f9..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build musl - -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_linux_musl_amd64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.a b/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.a deleted file mode 100644 index 81a185fcdf..0000000000 Binary files a/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.a and /dev/null differ diff --git a/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.go b/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.go deleted file mode 100644 index 06107ed974..0000000000 --- a/vendor/github.com/valyala/gozstd/libzstd_windows_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -package gozstd - -/* -#cgo LDFLAGS: ${SRCDIR}/libzstd_windows_amd64.a -*/ -import "C" diff --git a/vendor/github.com/valyala/gozstd/reader.go b/vendor/github.com/valyala/gozstd/reader.go deleted file mode 100644 index eac8816348..0000000000 --- a/vendor/github.com/valyala/gozstd/reader.go +++ /dev/null @@ -1,264 +0,0 @@ -package gozstd - -/* -#cgo CFLAGS: -O3 - -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" -#include "zstd_errors.h" - -#include // for malloc/free -#include // for uintptr_t - -// The following *_wrapper functions allow avoiding memory allocations -// durting calls from Go. -// See https://github.com/golang/go/issues/24450 . - -static size_t ZSTD_initDStream_usingDDict_wrapper(uintptr_t ds, uintptr_t dict) { - return ZSTD_initDStream_usingDDict((ZSTD_DStream*)ds, (ZSTD_DDict*)dict); -} - -static size_t ZSTD_freeDStream_wrapper(uintptr_t ds) { - return ZSTD_freeDStream((ZSTD_DStream*)ds); -} - -static size_t ZSTD_decompressStream_wrapper(uintptr_t ds, uintptr_t output, uintptr_t input) { - return ZSTD_decompressStream((ZSTD_DStream*)ds, (ZSTD_outBuffer*)output, (ZSTD_inBuffer*)input); -} -*/ -import "C" - -import ( - "fmt" - "io" - "runtime" - "unsafe" -) - -var ( - dstreamInBufSize = C.ZSTD_DStreamInSize() - dstreamOutBufSize = C.ZSTD_DStreamOutSize() -) - -// Reader implements zstd reader. -type Reader struct { - r io.Reader - ds *C.ZSTD_DStream - dd *DDict - - inBuf *C.ZSTD_inBuffer - outBuf *C.ZSTD_outBuffer - - inBufGo cMemPtr - outBufGo cMemPtr -} - -// NewReader returns new zstd reader reading compressed data from r. -// -// Call Release when the Reader is no longer needed. -func NewReader(r io.Reader) *Reader { - return NewReaderDict(r, nil) -} - -// NewReaderDict returns new zstd reader reading compressed data from r -// using the given DDict. -// -// Call Release when the Reader is no longer needed. -func NewReaderDict(r io.Reader, dd *DDict) *Reader { - ds := C.ZSTD_createDStream() - initDStream(ds, dd) - - inBuf := (*C.ZSTD_inBuffer)(C.malloc(C.sizeof_ZSTD_inBuffer)) - inBuf.src = C.malloc(dstreamInBufSize) - inBuf.size = 0 - inBuf.pos = 0 - - outBuf := (*C.ZSTD_outBuffer)(C.malloc(C.sizeof_ZSTD_outBuffer)) - outBuf.dst = C.malloc(dstreamOutBufSize) - outBuf.size = 0 - outBuf.pos = 0 - - zr := &Reader{ - r: r, - ds: ds, - dd: dd, - inBuf: inBuf, - outBuf: outBuf, - } - - zr.inBufGo = cMemPtr(zr.inBuf.src) - zr.outBufGo = cMemPtr(zr.outBuf.dst) - - runtime.SetFinalizer(zr, freeDStream) - return zr -} - -// Reset resets zr to read from r using the given dictionary dd. -func (zr *Reader) Reset(r io.Reader, dd *DDict) { - zr.inBuf.size = 0 - zr.inBuf.pos = 0 - zr.outBuf.size = 0 - zr.outBuf.pos = 0 - - zr.dd = dd - initDStream(zr.ds, zr.dd) - - zr.r = r -} - -func initDStream(ds *C.ZSTD_DStream, dd *DDict) { - var ddict *C.ZSTD_DDict - if dd != nil { - ddict = dd.p - } - result := C.ZSTD_initDStream_usingDDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(ds))), - C.uintptr_t(uintptr(unsafe.Pointer(ddict)))) - ensureNoError("ZSTD_initDStream_usingDDict", result) -} - -func freeDStream(v interface{}) { - v.(*Reader).Release() -} - -// Release releases all the resources occupied by zr. -// -// zr cannot be used after the release. -func (zr *Reader) Release() { - if zr.ds == nil { - return - } - - result := C.ZSTD_freeDStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zr.ds)))) - ensureNoError("ZSTD_freeDStream", result) - zr.ds = nil - - C.free(zr.inBuf.src) - C.free(unsafe.Pointer(zr.inBuf)) - zr.inBuf = nil - - C.free(zr.outBuf.dst) - C.free(unsafe.Pointer(zr.outBuf)) - zr.outBuf = nil - - zr.r = nil - zr.dd = nil -} - -// WriteTo writes all the data from zr to w. -// -// It returns the number of bytes written to w. -func (zr *Reader) WriteTo(w io.Writer) (int64, error) { - nn := int64(0) - for { - if zr.outBuf.pos == zr.outBuf.size { - if err := zr.fillOutBuf(); err != nil { - if err == io.EOF { - return nn, nil - } - return nn, err - } - } - n, err := w.Write(zr.outBufGo[zr.outBuf.pos:zr.outBuf.size]) - zr.outBuf.pos += C.size_t(n) - nn += int64(n) - if err != nil { - return nn, err - } - } -} - -// Read reads up to len(p) bytes from zr to p. -func (zr *Reader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - if zr.outBuf.pos == zr.outBuf.size { - if err := zr.fillOutBuf(); err != nil { - return 0, err - } - } - - n := copy(p, zr.outBufGo[zr.outBuf.pos:zr.outBuf.size]) - zr.outBuf.pos += C.size_t(n) - return n, nil -} - -func (zr *Reader) fillOutBuf() error { - if zr.inBuf.pos == zr.inBuf.size && zr.outBuf.size < dstreamOutBufSize { - // inBuf is empty and the previously decompressed data size - // is smaller than the maximum possible zr.outBuf.size. - // This means that the internal buffer in zr.ds doesn't contain - // more data to decompress, so read new data into inBuf. - if err := zr.fillInBuf(); err != nil { - return err - } - } - -tryDecompressAgain: - // Try decompressing inBuf into outBuf. - zr.outBuf.size = dstreamOutBufSize - zr.outBuf.pos = 0 - prevInBufPos := zr.inBuf.pos - result := C.ZSTD_decompressStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zr.ds))), - C.uintptr_t(uintptr(unsafe.Pointer(zr.outBuf))), - C.uintptr_t(uintptr(unsafe.Pointer(zr.inBuf)))) - zr.outBuf.size = zr.outBuf.pos - zr.outBuf.pos = 0 - - if C.ZSTD_getErrorCode(result) != 0 { - return fmt.Errorf("cannot decompress data: %s", errStr(result)) - } - - if zr.outBuf.size > 0 { - // Something has been decompressed to outBuf. Return it. - return nil - } - - // Nothing has been decompressed from inBuf. - if zr.inBuf.pos != prevInBufPos && zr.inBuf.pos < zr.inBuf.size { - // Data has been consumed from inBuf, but decompressed - // into nothing. There is more data in inBuf, so try - // decompressing it again. - goto tryDecompressAgain - } - - // Either nothing has been consumed from inBuf or it has been - // decompressed into nothing and inBuf became empty. - // Read more data into inBuf and try decompressing again. - if err := zr.fillInBuf(); err != nil { - return err - } - goto tryDecompressAgain -} - -func (zr *Reader) fillInBuf() error { - // Copy the remaining data to the start of inBuf. - copy(zr.inBufGo[:dstreamInBufSize], zr.inBufGo[zr.inBuf.pos:zr.inBuf.size]) - zr.inBuf.size -= zr.inBuf.pos - zr.inBuf.pos = 0 - -readAgain: - // Read more data into inBuf. - n, err := zr.r.Read(zr.inBufGo[zr.inBuf.size:dstreamInBufSize]) - zr.inBuf.size += C.size_t(n) - if err == nil { - if n == 0 { - // Nothing has been read. Try reading data again. - goto readAgain - } - return nil - } - if n > 0 { - // Do not return error if at least a single byte read, i.e. forward progress is made. - return nil - } - if err == io.EOF { - // Do not wrap io.EOF, so the caller may notify the end of stream. - return err - } - return fmt.Errorf("cannot read data from the underlying reader: %s", err) -} diff --git a/vendor/github.com/valyala/gozstd/stream.go b/vendor/github.com/valyala/gozstd/stream.go deleted file mode 100644 index 939b1c8e6c..0000000000 --- a/vendor/github.com/valyala/gozstd/stream.go +++ /dev/null @@ -1,128 +0,0 @@ -package gozstd - -import ( - "io" - "sync" -) - -// StreamCompress compresses src into dst. -// -// This function doesn't work with interactive network streams, since data read -// from src may be buffered before passing to dst for performance reasons. -// Use Writer.Flush for interactive network streams. -func StreamCompress(dst io.Writer, src io.Reader) error { - return streamCompressDictLevel(dst, src, nil, DefaultCompressionLevel) -} - -// StreamCompressLevel compresses src into dst using the given compressionLevel. -// -// This function doesn't work with interactive network streams, since data read -// from src may be buffered before passing to dst for performance reasons. -// Use Writer.Flush for interactive network streams. -func StreamCompressLevel(dst io.Writer, src io.Reader, compressionLevel int) error { - return streamCompressDictLevel(dst, src, nil, compressionLevel) -} - -// StreamCompressDict compresses src into dst using the given dict cd. -// -// This function doesn't work with interactive network streams, since data read -// from src may be buffered before passing to dst for performance reasons. -// Use Writer.Flush for interactive network streams. -func StreamCompressDict(dst io.Writer, src io.Reader, cd *CDict) error { - return streamCompressDictLevel(dst, src, cd, 0) -} - -func streamCompressDictLevel(dst io.Writer, src io.Reader, cd *CDict, compressionLevel int) error { - sc := getSCompressor(compressionLevel) - sc.zw.Reset(dst, cd, compressionLevel) - _, err := sc.zw.ReadFrom(src) - if err == nil { - err = sc.zw.Close() - } - putSCompressor(sc) - return err -} - -type sCompressor struct { - zw *Writer - compressionLevel int -} - -func getSCompressor(compressionLevel int) *sCompressor { - p := getSCompressorPool(compressionLevel) - v := p.Get() - if v == nil { - return &sCompressor{ - zw: NewWriterLevel(nil, compressionLevel), - compressionLevel: compressionLevel, - } - } - return v.(*sCompressor) -} - -func putSCompressor(sc *sCompressor) { - sc.zw.Reset(nil, nil, sc.compressionLevel) - p := getSCompressorPool(sc.compressionLevel) - p.Put(sc) -} - -func getSCompressorPool(compressionLevel int) *sync.Pool { - // Use per-level compressor pools, since Writer.Reset is expensive - // between distinct compression levels. - sCompressorPoolLock.Lock() - p := sCompressorPool[compressionLevel] - if p == nil { - p = &sync.Pool{} - sCompressorPool[compressionLevel] = p - } - sCompressorPoolLock.Unlock() - return p -} - -var ( - sCompressorPoolLock sync.Mutex - sCompressorPool = make(map[int]*sync.Pool) -) - -// StreamDecompress decompresses src into dst. -// -// This function doesn't work with interactive network streams, since data read -// from src may be buffered before passing to dst for performance reasons. -// Use Reader for interactive network streams. -func StreamDecompress(dst io.Writer, src io.Reader) error { - return StreamDecompressDict(dst, src, nil) -} - -// StreamDecompressDict decompresses src into dst using the given dictionary dd. -// -// This function doesn't work with interactive network streams, since data read -// from src may be buffered before passing to dst for performance reasons. -// Use Reader for interactive network streams. -func StreamDecompressDict(dst io.Writer, src io.Reader, dd *DDict) error { - sd := getSDecompressor() - sd.zr.Reset(src, dd) - _, err := sd.zr.WriteTo(dst) - putSDecompressor(sd) - return err -} - -type sDecompressor struct { - zr *Reader -} - -func getSDecompressor() *sDecompressor { - v := sDecompressorPool.Get() - if v == nil { - return &sDecompressor{ - zr: NewReader(nil), - } - } - return v.(*sDecompressor) -} - -func putSDecompressor(sd *sDecompressor) { - sd.zr.Reset(nil, nil) - sDecompressorPool.Put(sd) -} - -var sDecompressorPool sync.Pool diff --git a/vendor/github.com/valyala/gozstd/writer.go b/vendor/github.com/valyala/gozstd/writer.go deleted file mode 100644 index ddfb85abc4..0000000000 --- a/vendor/github.com/valyala/gozstd/writer.go +++ /dev/null @@ -1,414 +0,0 @@ -package gozstd - -/* -#cgo CFLAGS: -O3 - -#define ZSTD_STATIC_LINKING_ONLY -#include "zstd.h" -#include "zstd_errors.h" - -#include // for malloc/free -#include // for uintptr_t - -// The following *_wrapper functions allow avoiding memory allocations -// durting calls from Go. -// See https://github.com/golang/go/issues/24450 . - -static size_t ZSTD_CCtx_setParameter_wrapper(uintptr_t cs, ZSTD_cParameter param, int value) { - return ZSTD_CCtx_setParameter((ZSTD_CStream*)cs, param, value); -} - -static size_t ZSTD_initCStream_wrapper(uintptr_t cs, int compressionLevel) { - return ZSTD_initCStream((ZSTD_CStream*)cs, compressionLevel); -} - -static size_t ZSTD_CCtx_refCDict_wrapper(uintptr_t cc, uintptr_t dict) { - return ZSTD_CCtx_refCDict((ZSTD_CCtx*)cc, (ZSTD_CDict*)dict); -} - -static size_t ZSTD_freeCStream_wrapper(uintptr_t cs) { - return ZSTD_freeCStream((ZSTD_CStream*)cs); -} - -static size_t ZSTD_compressStream_wrapper(uintptr_t cs, uintptr_t output, uintptr_t input) { - return ZSTD_compressStream((ZSTD_CStream*)cs, (ZSTD_outBuffer*)output, (ZSTD_inBuffer*)input); -} - -static size_t ZSTD_flushStream_wrapper(uintptr_t cs, uintptr_t output) { - return ZSTD_flushStream((ZSTD_CStream*)cs, (ZSTD_outBuffer*)output); -} - -static size_t ZSTD_endStream_wrapper(uintptr_t cs, uintptr_t output) { - return ZSTD_endStream((ZSTD_CStream*)cs, (ZSTD_outBuffer*)output); -} - -*/ -import "C" - -import ( - "fmt" - "io" - "runtime" - "unsafe" -) - -var ( - cstreamInBufSize = C.ZSTD_CStreamInSize() - cstreamOutBufSize = C.ZSTD_CStreamOutSize() -) - -type cMemPtr *[1 << 30]byte - -// Writer implements zstd writer. -type Writer struct { - w io.Writer - compressionLevel int - wlog int - cs *C.ZSTD_CStream - cd *CDict - - inBuf *C.ZSTD_inBuffer - outBuf *C.ZSTD_outBuffer - - inBufGo cMemPtr - outBufGo cMemPtr -} - -// NewWriter returns new zstd writer writing compressed data to w. -// -// The returned writer must be closed with Close call in order -// to finalize the compressed stream. -// -// Call Release when the Writer is no longer needed. -func NewWriter(w io.Writer) *Writer { - return NewWriterParams(w, nil) -} - -// NewWriterLevel returns new zstd writer writing compressed data to w -// at the given compression level. -// -// The returned writer must be closed with Close call in order -// to finalize the compressed stream. -// -// Call Release when the Writer is no longer needed. -func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { - params := &WriterParams{ - CompressionLevel: compressionLevel, - } - return NewWriterParams(w, params) -} - -// NewWriterDict returns new zstd writer writing compressed data to w -// using the given cd. -// -// The returned writer must be closed with Close call in order -// to finalize the compressed stream. -// -// Call Release when the Writer is no longer needed. -func NewWriterDict(w io.Writer, cd *CDict) *Writer { - params := &WriterParams{ - Dict: cd, - } - return NewWriterParams(w, params) -} - -const ( - // WindowLogMin is the minimum value of the windowLog parameter. - WindowLogMin = 10 // from zstd.h - // WindowLogMax32 is the maximum value of the windowLog parameter on 32-bit architectures. - WindowLogMax32 = 30 // from zstd.h - // WindowLogMax64 is the maximum value of the windowLog parameter on 64-bit architectures. - WindowLogMax64 = 31 // from zstd.h - - // DefaultWindowLog is the default value of the windowLog parameter. - DefaultWindowLog = 0 -) - -// A WriterParams allows users to specify compression parameters by calling -// NewWriterParams. -// -// Calling NewWriterParams with a nil WriterParams is equivalent to calling -// NewWriter. -type WriterParams struct { - // Compression level. Special value 0 means 'default compression level'. - CompressionLevel int - - // WindowLog. Must be clamped between WindowLogMin and WindowLogMin32/64. - // Special value 0 means 'use default windowLog'. - // - // Note: enabling log distance matching increases memory usage for both - // compressor and decompressor. When set to a value greater than 27, the - // decompressor requires special treatment. - WindowLog int - - // Dict is optional dictionary used for compression. - Dict *CDict -} - -// NewWriterParams returns new zstd writer writing compressed data to w -// using the given set of parameters. -// -// The returned writer must be closed with Close call in order -// to finalize the compressed stream. -// -// Call Release when the Writer is no longer needed. -func NewWriterParams(w io.Writer, params *WriterParams) *Writer { - if params == nil { - params = &WriterParams{} - } - - cs := C.ZSTD_createCStream() - initCStream(cs, *params) - - inBuf := (*C.ZSTD_inBuffer)(C.malloc(C.sizeof_ZSTD_inBuffer)) - inBuf.src = C.malloc(cstreamInBufSize) - inBuf.size = 0 - inBuf.pos = 0 - - outBuf := (*C.ZSTD_outBuffer)(C.malloc(C.sizeof_ZSTD_outBuffer)) - outBuf.dst = C.malloc(cstreamOutBufSize) - outBuf.size = cstreamOutBufSize - outBuf.pos = 0 - - zw := &Writer{ - w: w, - compressionLevel: params.CompressionLevel, - wlog: params.WindowLog, - cs: cs, - cd: params.Dict, - inBuf: inBuf, - outBuf: outBuf, - } - - zw.inBufGo = cMemPtr(zw.inBuf.src) - zw.outBufGo = cMemPtr(zw.outBuf.dst) - - runtime.SetFinalizer(zw, freeCStream) - return zw -} - -// Reset resets zw to write to w using the given dictionary cd and the given -// compressionLevel. Use ResetWriterParams if you wish to change other -// parameters that were set via WriterParams. -func (zw *Writer) Reset(w io.Writer, cd *CDict, compressionLevel int) { - params := WriterParams{ - CompressionLevel: compressionLevel, - WindowLog: zw.wlog, - Dict: cd, - } - zw.ResetWriterParams(w, ¶ms) -} - -// ResetWriterParams resets zw to write to w using the given set of parameters. -func (zw *Writer) ResetWriterParams(w io.Writer, params *WriterParams) { - zw.inBuf.size = 0 - zw.inBuf.pos = 0 - zw.outBuf.size = cstreamOutBufSize - zw.outBuf.pos = 0 - - zw.cd = params.Dict - initCStream(zw.cs, *params) - - zw.w = w -} - -func initCStream(cs *C.ZSTD_CStream, params WriterParams) { - if params.Dict != nil { - result := C.ZSTD_CCtx_refCDict_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(cs))), - C.uintptr_t(uintptr(unsafe.Pointer(params.Dict.p)))) - ensureNoError("ZSTD_CCtx_refCDict", result) - } else { - result := C.ZSTD_initCStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(cs))), - C.int(params.CompressionLevel)) - ensureNoError("ZSTD_initCStream", result) - } - - result := C.ZSTD_CCtx_setParameter_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(cs))), - C.ZSTD_cParameter(C.ZSTD_c_windowLog), - C.int(params.WindowLog)) - ensureNoError("ZSTD_CCtx_setParameter", result) -} - -func freeCStream(v interface{}) { - v.(*Writer).Release() -} - -// Release releases all the resources occupied by zw. -// -// zw cannot be used after the release. -func (zw *Writer) Release() { - if zw.cs == nil { - return - } - - result := C.ZSTD_freeCStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zw.cs)))) - ensureNoError("ZSTD_freeCStream", result) - zw.cs = nil - - C.free(unsafe.Pointer(zw.inBuf.src)) - C.free(unsafe.Pointer(zw.inBuf)) - zw.inBuf = nil - - C.free(unsafe.Pointer(zw.outBuf.dst)) - C.free(unsafe.Pointer(zw.outBuf)) - zw.outBuf = nil - - zw.w = nil - zw.cd = nil -} - -// ReadFrom reads all the data from r and writes it to zw. -// -// Returns the number of bytes read from r. -// -// ReadFrom may not flush the compressed data to the underlying writer -// due to performance reasons. -// Call Flush or Close when the compressed data must propagate -// to the underlying writer. -func (zw *Writer) ReadFrom(r io.Reader) (int64, error) { - nn := int64(0) - for { - // Fill the inBuf. - for zw.inBuf.size < cstreamInBufSize { - n, err := r.Read(zw.inBufGo[zw.inBuf.size:cstreamInBufSize]) - - // Sometimes n > 0 even when Read() returns an error. - // This is true especially if the error is io.EOF. - zw.inBuf.size += C.size_t(n) - nn += int64(n) - - if err != nil { - if err == io.EOF { - return nn, nil - } - return nn, err - } - } - - // Flush the inBuf. - if err := zw.flushInBuf(); err != nil { - return nn, err - } - } -} - -// Write writes p to zw. -// -// Write doesn't flush the compressed data to the underlying writer -// due to performance reasons. -// Call Flush or Close when the compressed data must propagate -// to the underlying writer. -func (zw *Writer) Write(p []byte) (int, error) { - pLen := len(p) - if pLen == 0 { - return 0, nil - } - - for { - n := copy(zw.inBufGo[zw.inBuf.size:cstreamInBufSize], p) - zw.inBuf.size += C.size_t(n) - p = p[n:] - if len(p) == 0 { - // Fast path - just copy the data to input buffer. - return pLen, nil - } - if err := zw.flushInBuf(); err != nil { - return 0, err - } - } -} - -func (zw *Writer) flushInBuf() error { - prevInBufPos := zw.inBuf.pos - result := C.ZSTD_compressStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zw.cs))), - C.uintptr_t(uintptr(unsafe.Pointer(zw.outBuf))), - C.uintptr_t(uintptr(unsafe.Pointer(zw.inBuf)))) - ensureNoError("ZSTD_compressStream", result) - - // Move the remaining data to the start of inBuf. - copy(zw.inBufGo[:cstreamInBufSize], zw.inBufGo[zw.inBuf.pos:zw.inBuf.size]) - zw.inBuf.size -= zw.inBuf.pos - zw.inBuf.pos = 0 - - if zw.outBuf.size-zw.outBuf.pos > zw.outBuf.pos && prevInBufPos != zw.inBuf.pos { - // There is enough space in outBuf and the last compression - // succeeded, so don't flush outBuf yet. - return nil - } - - // Flush outBuf, since there is low space in it or the last compression - // attempt was unsuccessful. - return zw.flushOutBuf() -} - -func (zw *Writer) flushOutBuf() error { - if zw.outBuf.pos == 0 { - // Nothing to flush. - return nil - } - - outBuf := zw.outBufGo[:zw.outBuf.pos] - n, err := zw.w.Write(outBuf) - zw.outBuf.pos = 0 - if err != nil { - return fmt.Errorf("cannot flush internal buffer to the underlying writer: %s", err) - } - if n != len(outBuf) { - panic(fmt.Errorf("BUG: the underlying writer violated io.Writer contract and didn't return error after writing incomplete data; written %d bytes; want %d bytes", - n, len(outBuf))) - } - return nil -} - -// Flush flushes the remaining data from zw to the underlying writer. -func (zw *Writer) Flush() error { - // Flush inBuf. - for zw.inBuf.size > 0 { - if err := zw.flushInBuf(); err != nil { - return err - } - } - - // Flush the internal buffer to outBuf. - for { - result := C.ZSTD_flushStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zw.cs))), - C.uintptr_t(uintptr(unsafe.Pointer(zw.outBuf)))) - ensureNoError("ZSTD_flushStream", result) - if err := zw.flushOutBuf(); err != nil { - return err - } - if result == 0 { - // No more data left in the internal buffer. - return nil - } - } -} - -// Close finalizes the compressed stream and flushes all the compressed data -// to the underlying writer. -// -// It doesn't close the underlying writer passed to New* functions. -func (zw *Writer) Close() error { - if err := zw.Flush(); err != nil { - return err - } - - for { - result := C.ZSTD_endStream_wrapper( - C.uintptr_t(uintptr(unsafe.Pointer(zw.cs))), - C.uintptr_t(uintptr(unsafe.Pointer(zw.outBuf)))) - ensureNoError("ZSTD_endStream", result) - if err := zw.flushOutBuf(); err != nil { - return err - } - if result == 0 { - return nil - } - } -} diff --git a/vendor/github.com/valyala/gozstd/zdict.h b/vendor/github.com/valyala/gozstd/zdict.h deleted file mode 100644 index f1e139a40d..0000000000 --- a/vendor/github.com/valyala/gozstd/zdict.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef DICTBUILDER_H_001 -#define DICTBUILDER_H_001 - -#if defined (__cplusplus) -extern "C" { -#endif - - -/*====== Dependencies ======*/ -#include /* size_t */ - - -/* ===== ZDICTLIB_API : control library symbols visibility ===== */ -#ifndef ZDICTLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define ZDICTLIB_VISIBILITY -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZDICTLIB_API ZDICTLIB_VISIBILITY -#endif - -/******************************************************************************* - * Zstd dictionary builder - * - * FAQ - * === - * Why should I use a dictionary? - * ------------------------------ - * - * Zstd can use dictionaries to improve compression ratio of small data. - * Traditionally small files don't compress well because there is very little - * repetition in a single sample, since it is small. But, if you are compressing - * many similar files, like a bunch of JSON records that share the same - * structure, you can train a dictionary on ahead of time on some samples of - * these files. Then, zstd can use the dictionary to find repetitions that are - * present across samples. This can vastly improve compression ratio. - * - * When is a dictionary useful? - * ---------------------------- - * - * Dictionaries are useful when compressing many small files that are similar. - * The larger a file is, the less benefit a dictionary will have. Generally, - * we don't expect dictionary compression to be effective past 100KB. And the - * smaller a file is, the more we would expect the dictionary to help. - * - * How do I use a dictionary? - * -------------------------- - * - * Simply pass the dictionary to the zstd compressor with - * `ZSTD_CCtx_loadDictionary()`. The same dictionary must then be passed to - * the decompressor, using `ZSTD_DCtx_loadDictionary()`. There are other - * more advanced functions that allow selecting some options, see zstd.h for - * complete documentation. - * - * What is a zstd dictionary? - * -------------------------- - * - * A zstd dictionary has two pieces: Its header, and its content. The header - * contains a magic number, the dictionary ID, and entropy tables. These - * entropy tables allow zstd to save on header costs in the compressed file, - * which really matters for small data. The content is just bytes, which are - * repeated content that is common across many samples. - * - * What is a raw content dictionary? - * --------------------------------- - * - * A raw content dictionary is just bytes. It doesn't have a zstd dictionary - * header, a dictionary ID, or entropy tables. Any buffer is a valid raw - * content dictionary. - * - * How do I train a dictionary? - * ---------------------------- - * - * Gather samples from your use case. These samples should be similar to each - * other. If you have several use cases, you could try to train one dictionary - * per use case. - * - * Pass those samples to `ZDICT_trainFromBuffer()` and that will train your - * dictionary. There are a few advanced versions of this function, but this - * is a great starting point. If you want to further tune your dictionary - * you could try `ZDICT_optimizeTrainFromBuffer_cover()`. If that is too slow - * you can try `ZDICT_optimizeTrainFromBuffer_fastCover()`. - * - * If the dictionary training function fails, that is likely because you - * either passed too few samples, or a dictionary would not be effective - * for your data. Look at the messages that the dictionary trainer printed, - * if it doesn't say too few samples, then a dictionary would not be effective. - * - * How large should my dictionary be? - * ---------------------------------- - * - * A reasonable dictionary size, the `dictBufferCapacity`, is about 100KB. - * The zstd CLI defaults to a 110KB dictionary. You likely don't need a - * dictionary larger than that. But, most use cases can get away with a - * smaller dictionary. The advanced dictionary builders can automatically - * shrink the dictionary for you, and select a the smallest size that - * doesn't hurt compression ratio too much. See the `shrinkDict` parameter. - * A smaller dictionary can save memory, and potentially speed up - * compression. - * - * How many samples should I provide to the dictionary builder? - * ------------------------------------------------------------ - * - * We generally recommend passing ~100x the size of the dictionary - * in samples. A few thousand should suffice. Having too few samples - * can hurt the dictionaries effectiveness. Having more samples will - * only improve the dictionaries effectiveness. But having too many - * samples can slow down the dictionary builder. - * - * How do I determine if a dictionary will be effective? - * ----------------------------------------------------- - * - * Simply train a dictionary and try it out. You can use zstd's built in - * benchmarking tool to test the dictionary effectiveness. - * - * # Benchmark levels 1-3 without a dictionary - * zstd -b1e3 -r /path/to/my/files - * # Benchmark levels 1-3 with a dictionary - * zstd -b1e3 -r /path/to/my/files -D /path/to/my/dictionary - * - * When should I retrain a dictionary? - * ----------------------------------- - * - * You should retrain a dictionary when its effectiveness drops. Dictionary - * effectiveness drops as the data you are compressing changes. Generally, we do - * expect dictionaries to "decay" over time, as your data changes, but the rate - * at which they decay depends on your use case. Internally, we regularly - * retrain dictionaries, and if the new dictionary performs significantly - * better than the old dictionary, we will ship the new dictionary. - * - * I have a raw content dictionary, how do I turn it into a zstd dictionary? - * ------------------------------------------------------------------------- - * - * If you have a raw content dictionary, e.g. by manually constructing it, or - * using a third-party dictionary builder, you can turn it into a zstd - * dictionary by using `ZDICT_finalizeDictionary()`. You'll also have to - * provide some samples of the data. It will add the zstd header to the - * raw content, which contains a dictionary ID and entropy tables, which - * will improve compression ratio, and allow zstd to write the dictionary ID - * into the frame, if you so choose. - * - * Do I have to use zstd's dictionary builder? - * ------------------------------------------- - * - * No! You can construct dictionary content however you please, it is just - * bytes. It will always be valid as a raw content dictionary. If you want - * a zstd dictionary, which can improve compression ratio, use - * `ZDICT_finalizeDictionary()`. - * - * What is the attack surface of a zstd dictionary? - * ------------------------------------------------ - * - * Zstd is heavily fuzz tested, including loading fuzzed dictionaries, so - * zstd should never crash, or access out-of-bounds memory no matter what - * the dictionary is. However, if an attacker can control the dictionary - * during decompression, they can cause zstd to generate arbitrary bytes, - * just like if they controlled the compressed data. - * - ******************************************************************************/ - - -/*! ZDICT_trainFromBuffer(): - * Train a dictionary from an array of samples. - * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, - * f=20, and accel=1. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * Note: Dictionary training will fail if there are not enough samples to construct a - * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). - * If dictionary training fails, you should use zstd without a dictionary, as the dictionary - * would've been ineffective anyways. If you believe your samples would benefit from a dictionary - * please open an issue with details, and we can look into it. - * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples); - -typedef struct { - int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ - unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ - unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) - * NOTE: The zstd format reserves some dictionary IDs for future use. - * You may use them in private settings, but be warned that they - * may be used by zstd in a public dictionary registry in the future. - * These dictionary IDs are: - * - low range : <= 32767 - * - high range : >= (2^31) - */ -} ZDICT_params_t; - -/*! ZDICT_finalizeDictionary(): - * Given a custom content as a basis for dictionary, and a set of samples, - * finalize dictionary by adding headers and statistics according to the zstd - * dictionary format. - * - * Samples must be stored concatenated in a flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each - * sample in order. The samples are used to construct the statistics, so they - * should be representative of what you will compress with this dictionary. - * - * The compression level can be set in `parameters`. You should pass the - * compression level you expect to use in production. The statistics for each - * compression level differ, so tuning the dictionary for the compression level - * can help quite a bit. - * - * You can set an explicit dictionary ID in `parameters`, or allow us to pick - * a random dictionary ID for you, but we can't guarantee no collisions. - * - * The dstDictBuffer and the dictContent may overlap, and the content will be - * appended to the end of the header. If the header + the content doesn't fit in - * maxDictSize the beginning of the content is truncated to make room, since it - * is presumed that the most profitable content is at the end of the dictionary, - * since that is the cheapest to reference. - * - * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). - * - * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), - * or an error code, which can be tested by ZDICT_isError(). - * Note: ZDICT_finalizeDictionary() will push notifications into stderr if - * instructed to, using notificationLevel>0. - * NOTE: This function currently may fail in several edge cases including: - * * Not enough samples - * * Samples are uncompressible - * * Samples are all exactly the same - */ -ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, - const void* dictContent, size_t dictContentSize, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_params_t parameters); - - -/*====== Helper functions ======*/ -ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ -ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ -ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); -ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); - - - -#ifdef ZDICT_STATIC_LINKING_ONLY - -/* ==================================================================================== - * The definitions in this section are considered experimental. - * They should never be used with a dynamic library, as they may change in the future. - * They are provided for advanced usages. - * Use them only in association with static linking. - * ==================================================================================== */ - -#define ZDICT_DICTSIZE_MIN 256 -/* Deprecated: Remove in v1.6.0 */ -#define ZDICT_CONTENTSIZE_MIN 128 - -/*! ZDICT_cover_params_t: - * k and d are the only required parameters. - * For others, value 0 means default. - */ -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ - unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - ZDICT_params_t zParams; -} ZDICT_cover_params_t; - -typedef struct { - unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ - unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ - unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ - unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ - unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ - double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ - unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ - unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ - unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ - - ZDICT_params_t zParams; -} ZDICT_fastCover_params_t; - -/*! ZDICT_trainFromBuffer_cover(): - * Train a dictionary from an array of samples using the COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( - void *dictBuffer, size_t dictBufferCapacity, - const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, - ZDICT_cover_params_t parameters); - -/*! ZDICT_optimizeTrainFromBuffer_cover(): - * The same requirements as above hold for all the parameters except `parameters`. - * This function tries many parameter combinations and picks the best parameters. - * `*parameters` is filled with the best parameters found, - * dictionary constructed with those parameters is stored in `dictBuffer`. - * - * All of the parameters d, k, steps are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. - */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( - void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_cover_params_t* parameters); - -/*! ZDICT_trainFromBuffer_fastCover(): - * Train a dictionary from an array of samples using a modified version of COVER algorithm. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * d and k are required. - * All other parameters are optional, will use default values if not provided - * The resulting dictionary will be saved into `dictBuffer`. - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, - size_t dictBufferCapacity, const void *samplesBuffer, - const size_t *samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t parameters); - -/*! ZDICT_optimizeTrainFromBuffer_fastCover(): - * The same requirements as above hold for all the parameters except `parameters`. - * This function tries many parameter combinations (specifically, k and d combinations) - * and picks the best parameters. `*parameters` is filled with the best parameters found, - * dictionary constructed with those parameters is stored in `dictBuffer`. - * All of the parameters d, k, steps, f, and accel are optional. - * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. - * if steps is zero it defaults to its default value. - * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. - * If f is zero, default value of 20 is used. - * If accel is zero, default value of 1 is used. - * - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * On success `*parameters` contains the parameters selected. - * See ZDICT_trainFromBuffer() for details on failure modes. - * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. - */ -ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, - size_t dictBufferCapacity, const void* samplesBuffer, - const size_t* samplesSizes, unsigned nbSamples, - ZDICT_fastCover_params_t* parameters); - -typedef struct { - unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ - ZDICT_params_t zParams; -} ZDICT_legacy_params_t; - -/*! ZDICT_trainFromBuffer_legacy(): - * Train a dictionary from an array of samples. - * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, - * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. - * The resulting dictionary will be saved into `dictBuffer`. - * `parameters` is optional and can be provided with values set to 0 to mean "default". - * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) - * or an error code, which can be tested with ZDICT_isError(). - * See ZDICT_trainFromBuffer() for details on failure modes. - * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. - * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. - * In general, it's recommended to provide a few thousands samples, though this can vary a lot. - * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. - * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. - */ -ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( - void* dictBuffer, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, - ZDICT_legacy_params_t parameters); - - -/* Deprecation warnings */ -/* It is generally possible to disable deprecation warnings from compiler, - for example with -Wno-deprecated-declarations for gcc - or _CRT_SECURE_NO_WARNINGS in Visual. - Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ -#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS -# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ -#else -# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API -# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) -# elif (ZDICT_GCC_VERSION >= 301) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") -# define ZDICT_DEPRECATED(message) ZDICTLIB_API -# endif -#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ - -ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") -size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, - const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); - - -#endif /* ZDICT_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif - -#endif /* DICTBUILDER_H_001 */ diff --git a/vendor/github.com/valyala/gozstd/zstd.h b/vendor/github.com/valyala/gozstd/zstd.h deleted file mode 100644 index 66ec125d00..0000000000 --- a/vendor/github.com/valyala/gozstd/zstd.h +++ /dev/null @@ -1,2575 +0,0 @@ -/* - * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ -#if defined (__cplusplus) -extern "C" { -#endif - -#ifndef ZSTD_H_235446 -#define ZSTD_H_235446 - -/* ====== Dependency ======*/ -#include /* INT_MAX */ -#include /* size_t */ - - -/* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDLIB_VISIBLE -# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) -# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) -# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) -# else -# define ZSTDLIB_VISIBLE -# define ZSTDLIB_HIDDEN -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZSTDLIB_API ZSTDLIB_VISIBLE -#endif - - -/******************************************************************************* - Introduction - - zstd, short for Zstandard, is a fast lossless compression algorithm, targeting - real-time compression scenarios at zlib-level and better compression ratios. - The zstd compression library provides in-memory compression and decompression - functions. - - The library supports regular compression levels from 1 up to ZSTD_maxCLevel(), - which is currently 22. Levels >= 20, labeled `--ultra`, should be used with - caution, as they require more memory. The library also offers negative - compression levels, which extend the range of speed vs. ratio preferences. - The lower the level, the faster the speed (at the cost of compression). - - Compression can be done in: - - a single step (described as Simple API) - - a single step, reusing a context (described as Explicit context) - - unbounded multiple steps (described as Streaming compression) - - The compression ratio achievable on small data can be highly improved using - a dictionary. Dictionary compression can be performed in: - - a single step (described as Simple dictionary API) - - a single step, reusing a dictionary (described as Bulk-processing - dictionary API) - - Advanced experimental functions can be accessed using - `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h. - - Advanced experimental APIs should never be used with a dynamically-linked - library. They are not "stable"; their definitions or signatures may change in - the future. Only static linking is allowed. -*******************************************************************************/ - -/*------ Version ------*/ -#define ZSTD_VERSION_MAJOR 1 -#define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 1 -#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) - -/*! ZSTD_versionNumber() : - * Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */ -ZSTDLIB_API unsigned ZSTD_versionNumber(void); - -#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE -#define ZSTD_QUOTE(str) #str -#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) -#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) - -/*! ZSTD_versionString() : - * Return runtime library version, like "1.4.5". Requires v1.3.0+. */ -ZSTDLIB_API const char* ZSTD_versionString(void); - -/* ************************************* - * Default constant - ***************************************/ -#ifndef ZSTD_CLEVEL_DEFAULT -# define ZSTD_CLEVEL_DEFAULT 3 -#endif - -/* ************************************* - * Constants - ***************************************/ - -/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */ -#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */ -#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */ -#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */ -#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0 - -#define ZSTD_BLOCKSIZELOG_MAX 17 -#define ZSTD_BLOCKSIZE_MAX (1<= `ZSTD_compressBound(srcSize)`. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel); - -/*! ZSTD_decompress() : - * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - * `dstCapacity` is an upper bound of originalSize to regenerate. - * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, - const void* src, size_t compressedSize); - -/*! ZSTD_getFrameContentSize() : requires v1.3.0+ - * `src` should point to the start of a ZSTD encoded frame. - * `srcSize` must be at least as large as the frame header. - * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. - * @return : - decompressed size of `src` frame content, if known - * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) - * note 1 : a 0 return value means the frame is valid but "empty". - * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * Optionally, application can rely on some implicit limit, - * as ZSTD_decompress() only needs an upper bound of decompressed size. - * (For example, data could be necessarily cut into blocks <= 16 KB). - * note 3 : decompressed size is always present when compression is completed using single-pass functions, - * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). - * note 4 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure return value fits within application's authorized limits. - * Each application can set its own limits. - * note 6 : This function replaces ZSTD_getDecompressedSize() */ -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) -#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) -ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); - -/*! ZSTD_getDecompressedSize() : - * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). - * Both functions work the same way, but ZSTD_getDecompressedSize() blends - * "empty", "unknown" and "error" results to the same return value (0), - * while ZSTD_getFrameContentSize() gives them separate return values. - * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ -ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); - -/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ - * `src` should point to the start of a ZSTD frame or skippable frame. - * `srcSize` must be >= first frame size - * @return : the compressed size of the first frame starting at `src`, - * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, - * or an error code if input is invalid */ -ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); - - -/*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ -ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ -ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ -ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ -ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ - - -/*************************************** -* Explicit context -***************************************/ -/*= Compression context - * When compressing many times, - * it is recommended to allocate a context just once, - * and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. - * Note : re-using context is just a speed / resource optimization. - * It doesn't change the compression ratio, which remains identical. - * Note 2 : In multi-threaded environments, - * use one different context per thread for parallel execution. - */ -typedef struct ZSTD_CCtx_s ZSTD_CCtx; -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); -ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */ - -/*! ZSTD_compressCCtx() : - * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. - * Important : in order to behave similarly to `ZSTD_compress()`, - * this function compresses at requested compression level, - * __ignoring any other parameter__ . - * If any advanced parameter was set using the advanced API, - * they will all be reset. Only `compressionLevel` remains. - */ -ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - int compressionLevel); - -/*= Decompression context - * When decompressing many times, - * it is recommended to allocate a context only once, - * and re-use it for each successive compression operation. - * This will make workload friendlier for system's memory. - * Use one context per thread for parallel execution. */ -typedef struct ZSTD_DCtx_s ZSTD_DCtx; -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); -ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */ - -/*! ZSTD_decompressDCtx() : - * Same as ZSTD_decompress(), - * requires an allocated ZSTD_DCtx. - * Compatible with sticky parameters. - */ -ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - - -/********************************************* -* Advanced compression API (Requires v1.4.0+) -**********************************************/ - -/* API design : - * Parameters are pushed one by one into an existing context, - * using ZSTD_CCtx_set*() functions. - * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. - * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! - * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ . - * - * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). - * - * This API supersedes all other "advanced" API entry points in the experimental section. - * In the future, we expect to remove from experimental API entry points which are redundant with this API. - */ - - -/* Compression strategies, listed from fastest to strongest */ -typedef enum { ZSTD_fast=1, - ZSTD_dfast=2, - ZSTD_greedy=3, - ZSTD_lazy=4, - ZSTD_lazy2=5, - ZSTD_btlazy2=6, - ZSTD_btopt=7, - ZSTD_btultra=8, - ZSTD_btultra2=9 - /* note : new strategies _might_ be added in the future. - Only the order (from fast to strong) is guaranteed */ -} ZSTD_strategy; - -typedef enum { - - /* compression parameters - * Note: When compressing with a ZSTD_CDict these parameters are superseded - * by the parameters used to construct the ZSTD_CDict. - * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */ - ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table. - * Note that exact compression parameters are dynamically determined, - * depending on both compression level and srcSize (when known). - * Default level is ZSTD_CLEVEL_DEFAULT==3. - * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT. - * Note 1 : it's possible to pass a negative compression level. - * Note 2 : setting a level does not automatically set all other compression parameters - * to default. Setting this will however eventually dynamically impact the compression - * parameters which have not been manually set. The manually set - * ones will 'stick'. */ - /* Advanced compression parameters : - * It's possible to pin down compression parameters to some specific values. - * In which case, these values are no longer dynamically selected by the compressor */ - ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2. - * This will set a memory budget for streaming decompression, - * with larger values requiring more memory - * and typically compressing more. - * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. - * Special: value 0 means "use default windowLog". - * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT - * requires explicitly allowing such size at streaming decompression stage. */ - ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2. - * Resulting memory usage is (1 << (hashLog+2)). - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. - * Larger tables improve compression ratio of strategies <= dFast, - * and improve speed of strategies > dFast. - * Special: value 0 means "use default hashLog". */ - ZSTD_c_chainLog=103, /* Size of the multi-probe search table, as a power of 2. - * Resulting memory usage is (1 << (chainLog+2)). - * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX. - * Larger tables result in better and slower compression. - * This parameter is useless for "fast" strategy. - * It's still useful when using "dfast" strategy, - * in which case it defines a secondary probe table. - * Special: value 0 means "use default chainLog". */ - ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2. - * More attempts result in better and slower compression. - * This parameter is useless for "fast" and "dFast" strategies. - * Special: value 0 means "use default searchLog". */ - ZSTD_c_minMatch=105, /* Minimum size of searched matches. - * Note that Zstandard can still find matches of smaller size, - * it just tweaks its search algorithm to look for this size and larger. - * Larger values increase compression and decompression speed, but decrease ratio. - * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX. - * Note that currently, for all strategies < btopt, effective minimum is 4. - * , for all strategies > fast, effective maximum is 6. - * Special: value 0 means "use default minMatchLength". */ - ZSTD_c_targetLength=106, /* Impact of this field depends on strategy. - * For strategies btopt, btultra & btultra2: - * Length of Match considered "good enough" to stop search. - * Larger values make compression stronger, and slower. - * For strategy fast: - * Distance between match sampling. - * Larger values make compression faster, and weaker. - * Special: value 0 means "use default targetLength". */ - ZSTD_c_strategy=107, /* See ZSTD_strategy enum definition. - * The higher the value of selected strategy, the more complex it is, - * resulting in stronger and slower compression. - * Special: value 0 means "use default strategy". */ - /* LDM mode parameters */ - ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. - * This parameter is designed to improve compression ratio - * for large inputs, by finding large matches at long distance. - * It increases memory usage and window size. - * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB - * except when expressly set to a different value. - * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and - * compression strategy >= ZSTD_btopt (== compression level 16+) */ - ZSTD_c_ldmHashLog=161, /* Size of the table for long distance matching, as a power of 2. - * Larger values increase memory usage and compression ratio, - * but decrease compression speed. - * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX - * default: windowlog - 7. - * Special: value 0 means "automatically determine hashlog". */ - ZSTD_c_ldmMinMatch=162, /* Minimum match size for long distance matcher. - * Larger/too small values usually decrease compression ratio. - * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. - * Special: value 0 means "use default value" (default: 64). */ - ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution. - * Larger values improve collision resolution but decrease compression speed. - * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX. - * Special: value 0 means "use default value" (default: 3). */ - ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table. - * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). - * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. - * Larger values improve compression speed. - * Deviating far from default value will likely result in a compression ratio decrease. - * Special: value 0 means "automatically determine hashRateLog". */ - - /* frame parameters */ - ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) - * Content size must be known at the beginning of compression. - * This is automatically the case when using ZSTD_compress2(), - * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */ - ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */ - ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */ - - /* multi-threading parameters */ - /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD). - * Otherwise, trying to set any other value than default (0) will be a no-op and return an error. - * In a situation where it's unknown if the linked library supports multi-threading or not, - * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property. - */ - ZSTD_c_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. - * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() : - * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller, - * while compression is performed in parallel, within worker thread(s). - * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end : - * in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call). - * More workers improve speed, but also increase memory usage. - * Default value is `0`, aka "single-threaded mode" : no worker is spawned, - * compression is performed inside Caller's thread, and all invocations are blocking */ - ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. - * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. - * 0 means default, which is dynamically determined based on compression parameters. - * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. - * The minimum size is automatically and transparently enforced. */ - ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. - * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. - * It helps preserve compression ratio, while each job is compressed in parallel. - * This value is enforced only when nbWorkers >= 1. - * Larger values increase compression ratio, but decrease speed. - * Possible values range from 0 to 9 : - * - 0 means "default" : value will be determined by the library, depending on strategy - * - 1 means "no overlap" - * - 9 means "full overlap", using a full window size. - * Each intermediate rank increases/decreases load size by a factor 2 : - * 9: full window; 8: w/2; 7: w/4; 6: w/8; 5:w/16; 4: w/32; 3:w/64; 2:w/128; 1:no overlap; 0:default - * default value varies between 6 and 9, depending on strategy */ - - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_c_rsyncable - * ZSTD_c_format - * ZSTD_c_forceMaxWindow - * ZSTD_c_forceAttachDict - * ZSTD_c_literalCompressionMode - * ZSTD_c_targetCBlockSize - * ZSTD_c_srcSizeHint - * ZSTD_c_enableDedicatedDictSearch - * ZSTD_c_stableInBuffer - * ZSTD_c_stableOutBuffer - * ZSTD_c_blockDelimiters - * ZSTD_c_validateSequences - * ZSTD_c_useBlockSplitter - * ZSTD_c_useRowMatchFinder - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly; - * also, the enums values themselves are unstable and can still change. - */ - ZSTD_c_experimentalParam1=500, - ZSTD_c_experimentalParam2=10, - ZSTD_c_experimentalParam3=1000, - ZSTD_c_experimentalParam4=1001, - ZSTD_c_experimentalParam5=1002, - ZSTD_c_experimentalParam6=1003, - ZSTD_c_experimentalParam7=1004, - ZSTD_c_experimentalParam8=1005, - ZSTD_c_experimentalParam9=1006, - ZSTD_c_experimentalParam10=1007, - ZSTD_c_experimentalParam11=1008, - ZSTD_c_experimentalParam12=1009, - ZSTD_c_experimentalParam13=1010, - ZSTD_c_experimentalParam14=1011, - ZSTD_c_experimentalParam15=1012 -} ZSTD_cParameter; - -typedef struct { - size_t error; - int lowerBound; - int upperBound; -} ZSTD_bounds; - -/*! ZSTD_cParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - lower and upper bounds, both inclusive - */ -ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam); - -/*! ZSTD_CCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_cParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is generally only possible during frame initialization (before starting compression). - * Exception : when using multi-threading mode (nbWorkers >= 1), - * the following parameters can be updated _during_ compression (within same frame): - * => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. - * new parameters will be active for next job only (after a flush()). - * @return : an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value); - -/*! ZSTD_CCtx_setPledgedSrcSize() : - * Total input data size to be compressed as a single frame. - * Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag. - * This value will also be controlled at end of frame, and trigger an error if not respected. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame. - * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. - * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame. - * Note 2 : pledgedSrcSize is only valid once, for the next frame. - * It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN. - * Note 3 : Whenever all input data is provided and consumed in a single round, - * for example with ZSTD_compress2(), - * or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end), - * this value is automatically overridden by srcSize instead. - */ -ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize); - -typedef enum { - ZSTD_reset_session_only = 1, - ZSTD_reset_parameters = 2, - ZSTD_reset_session_and_parameters = 3 -} ZSTD_ResetDirective; - -/*! ZSTD_CCtx_reset() : - * There are 2 different things that can be reset, independently or jointly : - * - The session : will stop compressing current frame, and make CCtx ready to start a new one. - * Useful after an error, or to interrupt any ongoing compression. - * Any internal data not yet flushed is cancelled. - * Compression parameters and dictionary remain unchanged. - * They will be used to compress next frame. - * Resetting session never fails. - * - The parameters : changes all parameters back to "default". - * This removes any reference to any dictionary too. - * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) - * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) - * - Both : similar to resetting the session, followed by resetting parameters. - */ -ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); - -/*! ZSTD_compress2() : - * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. - * ZSTD_compress2() always starts a new frame. - * Should cctx hold data from a previously unfinished frame, everything about it is forgotten. - * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - * - The function is always blocking, returns when compression is completed. - * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. - * @return : compressed size written into `dst` (<= `dstCapacity), - * or an error code if it fails (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - - -/*********************************************** -* Advanced decompression API (Requires v1.4.0+) -************************************************/ - -/* The advanced API pushes parameters one by one into an existing DCtx context. - * Parameters are sticky, and remain valid for all following frames - * using the same DCtx context. - * It's possible to reset parameters to default values using ZSTD_DCtx_reset(). - * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream(). - * Therefore, no new decompression function is necessary. - */ - -typedef enum { - - ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which - * the streaming API will refuse to allocate memory buffer - * in order to protect the host from unreasonable memory requirements. - * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. - * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT). - * Special: value 0 means "use default maximum windowLog". */ - - /* note : additional experimental parameters are also available - * within the experimental section of the API. - * At the time of this writing, they include : - * ZSTD_d_format - * ZSTD_d_stableOutBuffer - * ZSTD_d_forceIgnoreChecksum - * ZSTD_d_refMultipleDDicts - * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. - * note : never ever use experimentalParam? names directly - */ - ZSTD_d_experimentalParam1=1000, - ZSTD_d_experimentalParam2=1001, - ZSTD_d_experimentalParam3=1002, - ZSTD_d_experimentalParam4=1003 - -} ZSTD_dParameter; - -/*! ZSTD_dParam_getBounds() : - * All parameters must belong to an interval with lower and upper bounds, - * otherwise they will either trigger an error or be automatically clamped. - * @return : a structure, ZSTD_bounds, which contains - * - an error status field, which must be tested using ZSTD_isError() - * - both lower and upper bounds, inclusive - */ -ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam); - -/*! ZSTD_DCtx_setParameter() : - * Set one compression parameter, selected by enum ZSTD_dParameter. - * All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds(). - * Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter). - * Setting a parameter is only possible during frame initialization (before starting decompression). - * @return : 0, or an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value); - -/*! ZSTD_DCtx_reset() : - * Return a DCtx to clean state. - * Session and parameters can be reset jointly or separately. - * Parameters can only be reset when no active frame is being decompressed. - * @return : 0, or an error code, which can be tested with ZSTD_isError() - */ -ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset); - - -/**************************** -* Streaming -****************************/ - -typedef struct ZSTD_inBuffer_s { - const void* src; /**< start of input buffer */ - size_t size; /**< size of input buffer */ - size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ -} ZSTD_inBuffer; - -typedef struct ZSTD_outBuffer_s { - void* dst; /**< start of output buffer */ - size_t size; /**< size of output buffer */ - size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ -} ZSTD_outBuffer; - - - -/*-*********************************************************************** -* Streaming compression - HowTo -* -* A ZSTD_CStream object is required to track streaming operation. -* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. -* ZSTD_CStream objects can be reused multiple times on consecutive compression operations. -* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory. -* -* For parallel execution, use one separate ZSTD_CStream per thread. -* -* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing. -* -* Parameters are sticky : when starting a new compression on the same context, -* it will re-use the same sticky parameters as previous compression session. -* When in doubt, it's recommended to fully initialize the context before usage. -* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), -* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to -* set more specific parameters, the pledged source size, or load a dictionary. -* -* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to -* consume input stream. The function will automatically update both `pos` -* fields within `input` and `output`. -* Note that the function may not consume the entire input, for example, because -* the output buffer is already full, in which case `input.pos < input.size`. -* The caller must check if input has been entirely consumed. -* If not, the caller must make some room to receive more compressed data, -* and then present again remaining input data. -* note: ZSTD_e_continue is guaranteed to make some forward progress when called, -* but doesn't guarantee maximal forward progress. This is especially relevant -* when compressing with multiple threads. The call won't block if it can -* consume some input, but if it can't it will wait for some, but not all, -* output to be flushed. -* @return : provides a minimum amount of data remaining to be flushed from internal buffers -* or an error code, which can be tested using ZSTD_isError(). -* -* At any moment, it's possible to flush whatever data might remain stuck within internal buffer, -* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated. -* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0). -* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush. -* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the -* operation. -* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will -* block until the flush is complete or the output buffer is full. -* @return : 0 if internal buffers are entirely flushed, -* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), -* or an error code, which can be tested using ZSTD_isError(). -* -* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame. -* It will perform a flush and write frame epilogue. -* The epilogue is required for decoders to consider a frame completed. -* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush. -* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to -* start a new frame. -* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will -* block until the flush is complete or the output buffer is full. -* @return : 0 if frame fully completed and fully flushed, -* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size), -* or an error code, which can be tested using ZSTD_isError(). -* -* *******************************************************************/ - -typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */ - /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */ -/*===== ZSTD_CStream management functions =====*/ -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); -ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */ - -/*===== Streaming compression functions =====*/ -typedef enum { - ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */ - ZSTD_e_flush=1, /* flush any data provided so far, - * it creates (at least) one new block, that can be decoded immediately on reception; - * frame will continue: any future data can still reference previously compressed data, improving compression. - * note : multithreaded compression will block to flush as much output as possible. */ - ZSTD_e_end=2 /* flush any remaining data _and_ close current frame. - * note that frame is only closed after compressed data is fully flushed (return value == 0). - * After that point, any additional data starts a new frame. - * note : each frame is independent (does not reference any content from previous frame). - : note : multithreaded compression will block to flush as much output as possible. */ -} ZSTD_EndDirective; - -/*! ZSTD_compressStream2() : Requires v1.4.0+ - * Behaves about the same as ZSTD_compressStream, with additional control on end directive. - * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() - * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) - * - output->pos must be <= dstCapacity, input->pos must be <= srcSize - * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. - * - endOp must be a valid directive - * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller. - * - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available, - * and then immediately returns, just indicating that there is some data remaining to be flushed. - * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. - * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking. - * - @return provides a minimum amount of data remaining to be flushed from internal buffers - * or an error code, which can be tested using ZSTD_isError(). - * if @return != 0, flush is not fully completed, there is still some data left within internal buffers. - * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. - * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed. - * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0), - * only ZSTD_e_end or ZSTD_e_flush operations are allowed. - * Before starting a new compression job, or changing compression parameters, - * it is required to fully flush internal buffers. - */ -ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, - ZSTD_outBuffer* output, - ZSTD_inBuffer* input, - ZSTD_EndDirective endOp); - - -/* These buffer sizes are softly recommended. - * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output. - * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(), - * reducing the amount of memory shuffling and buffering, resulting in minor performance savings. - * - * However, note that these recommendations are from the perspective of a C caller program. - * If the streaming interface is invoked from some other language, - * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo, - * a major performance rule is to reduce crossing such interface to an absolute minimum. - * It's not rare that performance ends being spent more into the interface, rather than compression itself. - * In which cases, prefer using large buffers, as large as practical, - * for both input and output, to reduce the nb of roundtrips. - */ -ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ -ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */ - - -/* ***************************************************************************** - * This following is a legacy streaming API, available since v1.0+ . - * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). - * It is redundant, but remains fully supported. - * Streaming in combination with advanced parameters and dictionary compression - * can only be used through the new API. - ******************************************************************************/ - -/*! - * Equivalent to: - * - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - */ -ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); -/*! - * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue). - * NOTE: The return value is different. ZSTD_compressStream() returns a hint for - * the next read size (if non-zero and not an error). ZSTD_compressStream2() - * returns the minimum nb of bytes left to flush (if non-zero and not an error). - */ -ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); -/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */ -ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); -/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */ -ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); - - -/*-*************************************************************************** -* Streaming decompression - HowTo -* -* A ZSTD_DStream object is required to track streaming operations. -* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. -* ZSTD_DStream objects can be re-used multiple times. -* -* Use ZSTD_initDStream() to start a new decompression operation. -* @return : recommended first input size -* Alternatively, use advanced API to set specific properties. -* -* Use ZSTD_decompressStream() repetitively to consume your input. -* The function will update both `pos` fields. -* If `input.pos < input.size`, some input has not been consumed. -* It's up to the caller to present again remaining data. -* The function tries to flush all data decoded immediately, respecting output buffer size. -* If `output.pos < output.size`, decoder has flushed everything it could. -* But if `output.pos == output.size`, there might be some data left within internal buffers., -* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. -* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. -* @return : 0 when a frame is completely decoded and fully flushed, -* or an error code, which can be tested using ZSTD_isError(), -* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : -* the return value is a suggested next input size (just a hint for better latency) -* that will never request more than the remaining frame size. -* *******************************************************************************/ - -typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ - /* For compatibility with versions <= v1.2.0, prefer differentiating them. */ -/*===== ZSTD_DStream management functions =====*/ -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); -ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */ - -/*===== Streaming decompression functions =====*/ - -/* This function is redundant with the advanced API and equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_refDDict(zds, NULL); - */ -ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); - -ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); - -ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ -ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ - - -/************************** -* Simple dictionary API -***************************/ -/*! ZSTD_compress_usingDict() : - * Compression at an explicit compression level using a Dictionary. - * A dictionary can be any arbitrary data segment (also called a prefix), - * or a buffer with specified information (see zdict.h). - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ -ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - int compressionLevel); - -/*! ZSTD_decompress_usingDict() : - * Decompression using a known Dictionary. - * Dictionary must be identical to the one used during compression. - * Note : This function loads the dictionary, resulting in significant startup delay. - * It's intended for a dictionary used only once. - * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ -ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize); - - -/*********************************** - * Bulk processing dictionary API - **********************************/ -typedef struct ZSTD_CDict_s ZSTD_CDict; - -/*! ZSTD_createCDict() : - * When compressing multiple messages or blocks using the same dictionary, - * it's recommended to digest the dictionary only once, since it's a costly operation. - * ZSTD_createCDict() will create a state from digesting a dictionary. - * The resulting state can be used for future compression operations with very limited startup cost. - * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict. - * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content. - * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer, - * in which case the only thing that it transports is the @compressionLevel. - * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively, - * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, - int compressionLevel); - -/*! ZSTD_freeCDict() : - * Function frees memory allocated by ZSTD_createCDict(). - * If a NULL pointer is passed, no operation is performed. */ -ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); - -/*! ZSTD_compress_usingCDict() : - * Compression using a digested Dictionary. - * Recommended when same dictionary is used multiple times. - * Note : compression level is _decided at dictionary creation time_, - * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ -ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict); - - -typedef struct ZSTD_DDict_s ZSTD_DDict; - -/*! ZSTD_createDDict() : - * Create a digested dictionary, ready to start decompression operation without startup delay. - * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); - -/*! ZSTD_freeDDict() : - * Function frees memory allocated with ZSTD_createDDict() - * If a NULL pointer is passed, no operation is performed. */ -ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); - -/*! ZSTD_decompress_usingDDict() : - * Decompression using a digested Dictionary. - * Recommended when same dictionary is used multiple times. */ -ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_DDict* ddict); - - -/******************************** - * Dictionary helper functions - *******************************/ - -/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+ - * Provides the dictID stored within dictionary. - * if @return == 0, the dictionary is not conformant with Zstandard specification. - * It can still be loaded, but as a content-only dictionary. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); - -/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+ - * Provides the dictID of the dictionary loaded into `cdict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict); - -/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+ - * Provides the dictID of the dictionary loaded into `ddict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); - -/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+ - * Provides the dictID required to decompressed the frame stored within `src`. - * If @return == 0, the dictID could not be decoded. - * This could for one of the following reasons : - * - The frame does not require a dictionary to be decoded (most common case). - * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. - * Note : this use case also happens when using a non-conformant dictionary. - * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). - * - This is not a Zstandard frame. - * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); - - -/******************************************************************************* - * Advanced dictionary and prefix API (Requires v1.4.0+) - * - * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and - * only reset with the context is reset with ZSTD_reset_parameters or - * ZSTD_reset_session_and_parameters. Prefixes are single-use. - ******************************************************************************/ - - -/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal CDict from `dict` buffer. - * Decompression will have to use same dictionary. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Dictionary is sticky, it will be used for all future compressed frames. - * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters). - * Note 2 : Loading a dictionary involves building tables. - * It's also a CPU consuming operation, with non-negligible impact on latency. - * Tables are dependent on compression parameters, and for this reason, - * compression parameters can no longer be changed after loading a dictionary. - * Note 3 :`dict` content will be copied internally. - * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. - * In such a case, dictionary buffer must outlive its users. - * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() - * to precisely select how dictionary content must be interpreted. */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); - -/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used for all next compressed frames. - * Note that compression parameters are enforced from within CDict, - * and supersede any compression parameter previously set within CCtx. - * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. - * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode. - * The dictionary will remain valid for future compressed frames using same CCtx. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Referencing a NULL CDict means "return to no-dictionary mode". - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. - * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ -ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); - -/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ - * Reference a prefix (single-usage dictionary) for next compressed frame. - * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). - * Decompression will need same prefix to properly regenerate data. - * Compressing with a prefix is similar in outcome as performing a diff and compressing it, - * but performs much faster, especially during decompression (compression speed is tunable with compression level). - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary - * Note 1 : Prefix buffer is referenced. It **must** outlive compression. - * Its content must remain unmodified during compression. - * Note 2 : If the intention is to diff some large src data blob with some prior version of itself, - * ensure that the window size is large enough to contain the entire source. - * See ZSTD_c_windowLog. - * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters. - * It's a CPU consuming operation, with non-negligible impact on latency. - * If there is a need to use the same prefix multiple times, consider loadDictionary instead. - * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent). - * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, - const void* prefix, size_t prefixSize); - -/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ - * Create an internal DDict from dict buffer, - * to be used to decompress next frames. - * The dictionary remains valid for all future frames, until explicitly invalidated. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, - * meaning "return to no-dictionary mode". - * Note 1 : Loading a dictionary involves building tables, - * which has a non-negligible impact on CPU usage and latency. - * It's recommended to "load once, use many times", to amortize the cost - * Note 2 :`dict` content will be copied internally, so `dict` can be released after loading. - * Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead. - * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of - * how dictionary content is loaded and interpreted. - */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); - -/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ - * Reference a prepared dictionary, to be used to decompress next frames. - * The dictionary remains active for decompression of future frames using same DCtx. - * - * If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function - * will store the DDict references in a table, and the DDict used for decompression - * will be determined at decompression time, as per the dict ID in the frame. - * The memory for the table is allocated on the first call to refDDict, and can be - * freed with ZSTD_freeDCtx(). - * - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Currently, only one dictionary can be managed. - * Referencing a new dictionary effectively "discards" any previous one. - * Special: referencing a NULL DDict means "return to no-dictionary mode". - * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. - */ -ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); - -/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ - * Reference a prefix (single-usage dictionary) to decompress next frame. - * This is the reverse operation of ZSTD_CCtx_refPrefix(), - * and must use the same prefix as the one used during compression. - * Prefix is **only used once**. Reference is discarded at end of frame. - * End of frame is reached when ZSTD_decompressStream() returns 0. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary - * Note 2 : Prefix buffer is referenced. It **must** outlive decompression. - * Prefix buffer must remain unmodified up to the end of frame, - * reached when ZSTD_decompressStream() returns 0. - * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent). - * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section) - * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. - * A full dictionary is more costly, as it requires building tables. - */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, - const void* prefix, size_t prefixSize); - -/* === Memory management === */ - -/*! ZSTD_sizeof_*() : Requires v1.4.0+ - * These functions give the _current_ memory usage of selected object. - * Note that object memory usage can evolve (increase or decrease) over time. */ -ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); -ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); -ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); -ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); - -#endif /* ZSTD_H_235446 */ - - -/* ************************************************************************************** - * ADVANCED AND EXPERIMENTAL FUNCTIONS - **************************************************************************************** - * The definitions in the following section are considered experimental. - * They are provided for advanced scenarios. - * They should never be used with a dynamic library, as prototypes may change in the future. - * Use them only in association with static linking. - * ***************************************************************************************/ - -#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) -#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY - -/* This can be overridden externally to hide static symbols. */ -#ifndef ZSTDLIB_STATIC_API -# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_STATIC_API __declspec(dllexport) ZSTDLIB_VISIBLE -# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_STATIC_API __declspec(dllimport) ZSTDLIB_VISIBLE -# else -# define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE -# endif -#endif - -/* Deprecation warnings : - * Should these warnings be a problem, it is generally possible to disable them, - * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. - * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. - */ -#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */ -#else -# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API -# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message))) -# elif defined(__GNUC__) && (__GNUC__ >= 3) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated)) -# elif defined(_MSC_VER) -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message)) -# else -# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") -# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API -# endif -#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ - -/**************************************************************************************** - * experimental API (static linking only) - **************************************************************************************** - * The following symbols and constants - * are not planned to join "stable API" status in the near future. - * They can still change in future versions. - * Some of them are planned to remain in the static_only section indefinitely. - * Some of them might be removed in the future (especially when redundant with existing stable functions) - * ***************************************************************************************/ - -#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */ -#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2) -#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */ -#define ZSTD_SKIPPABLEHEADERSIZE 8 - -/* compression parameter bounds */ -#define ZSTD_WINDOWLOG_MAX_32 30 -#define ZSTD_WINDOWLOG_MAX_64 31 -#define ZSTD_WINDOWLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) -#define ZSTD_WINDOWLOG_MIN 10 -#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30) -#define ZSTD_HASHLOG_MIN 6 -#define ZSTD_CHAINLOG_MAX_32 29 -#define ZSTD_CHAINLOG_MAX_64 30 -#define ZSTD_CHAINLOG_MAX ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64)) -#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN -#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) -#define ZSTD_SEARCHLOG_MIN 1 -#define ZSTD_MINMATCH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ -#define ZSTD_MINMATCH_MIN 3 /* only for ZSTD_btopt+, faster strategies are limited to 4 */ -#define ZSTD_TARGETLENGTH_MAX ZSTD_BLOCKSIZE_MAX -#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ -#define ZSTD_STRATEGY_MIN ZSTD_fast -#define ZSTD_STRATEGY_MAX ZSTD_btultra2 - - -#define ZSTD_OVERLAPLOG_MIN 0 -#define ZSTD_OVERLAPLOG_MAX 9 - -#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame - * requiring larger than (1< 0: - * If litLength != 0: - * rep == 1 --> offset == repeat_offset_1 - * rep == 2 --> offset == repeat_offset_2 - * rep == 3 --> offset == repeat_offset_3 - * If litLength == 0: - * rep == 1 --> offset == repeat_offset_2 - * rep == 2 --> offset == repeat_offset_3 - * rep == 3 --> offset == repeat_offset_1 - 1 - * - * Note: This field is optional. ZSTD_generateSequences() will calculate the value of - * 'rep', but repeat offsets do not necessarily need to be calculated from an external - * sequence provider's perspective. For example, ZSTD_compressSequences() does not - * use this 'rep' field at all (as of now). - */ -} ZSTD_Sequence; - -typedef struct { - unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ - unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ - unsigned hashLog; /**< dispatch table : larger == faster, more memory */ - unsigned searchLog; /**< nb of searches : larger == more compression, slower */ - unsigned minMatch; /**< match length searched : larger == faster decompression, sometimes less compression */ - unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ - ZSTD_strategy strategy; /**< see ZSTD_strategy definition above */ -} ZSTD_compressionParameters; - -typedef struct { - int contentSizeFlag; /**< 1: content size will be in frame header (when known) */ - int checksumFlag; /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */ - int noDictIDFlag; /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */ -} ZSTD_frameParameters; - -typedef struct { - ZSTD_compressionParameters cParams; - ZSTD_frameParameters fParams; -} ZSTD_parameters; - -typedef enum { - ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ - ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ - ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */ -} ZSTD_dictContentType_e; - -typedef enum { - ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ - ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ -} ZSTD_dictLoadMethod_e; - -typedef enum { - ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ - ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number. - * Useful to save 4 bytes per generated frame. - * Decoder cannot recognise automatically this format, requiring this instruction. */ -} ZSTD_format_e; - -typedef enum { - /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */ - ZSTD_d_validateChecksum = 0, - ZSTD_d_ignoreChecksum = 1 -} ZSTD_forceIgnoreChecksum_e; - -typedef enum { - /* Note: this enum controls ZSTD_d_refMultipleDDicts */ - ZSTD_rmd_refSingleDDict = 0, - ZSTD_rmd_refMultipleDDicts = 1 -} ZSTD_refMultipleDDicts_e; - -typedef enum { - /* Note: this enum and the behavior it controls are effectively internal - * implementation details of the compressor. They are expected to continue - * to evolve and should be considered only in the context of extremely - * advanced performance tuning. - * - * Zstd currently supports the use of a CDict in three ways: - * - * - The contents of the CDict can be copied into the working context. This - * means that the compression can search both the dictionary and input - * while operating on a single set of internal tables. This makes - * the compression faster per-byte of input. However, the initial copy of - * the CDict's tables incurs a fixed cost at the beginning of the - * compression. For small compressions (< 8 KB), that copy can dominate - * the cost of the compression. - * - * - The CDict's tables can be used in-place. In this model, compression is - * slower per input byte, because the compressor has to search two sets of - * tables. However, this model incurs no start-up cost (as long as the - * working context's tables can be reused). For small inputs, this can be - * faster than copying the CDict's tables. - * - * - The CDict's tables are not used at all, and instead we use the working - * context alone to reload the dictionary and use params based on the source - * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict(). - * This method is effective when the dictionary sizes are very small relative - * to the input size, and the input size is fairly large to begin with. - * - * Zstd has a simple internal heuristic that selects which strategy to use - * at the beginning of a compression. However, if experimentation shows that - * Zstd is making poor choices, it is possible to override that choice with - * this enum. - */ - ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */ - ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */ - ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */ - ZSTD_dictForceLoad = 3 /* Always reload the dictionary */ -} ZSTD_dictAttachPref_e; - -typedef enum { - ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level. - * Negative compression levels will be uncompressed, and positive compression - * levels will be compressed. */ - ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be - * emitted if Huffman compression is not profitable. */ - ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */ -} ZSTD_literalCompressionMode_e; - -typedef enum { - /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final - * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable - * or ZSTD_ps_disable allow for a force enable/disable the feature. - */ - ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ - ZSTD_ps_enable = 1, /* Force-enable the feature */ - ZSTD_ps_disable = 2 /* Do not use the feature */ -} ZSTD_paramSwitch_e; - -/*************************************** -* Frame size functions -***************************************/ - -/*! ZSTD_findDecompressedSize() : - * `src` should point to the start of a series of ZSTD encoded and/or skippable frames - * `srcSize` must be the _exact_ size of this series - * (i.e. there should be a frame boundary at `src + srcSize`) - * @return : - decompressed size of all data in all successive frames - * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN - * - if an error occurred: ZSTD_CONTENTSIZE_ERROR - * - * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. - * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - * In which case, it's necessary to use streaming mode to decompress data. - * note 2 : decompressed size is always present when compression is done with ZSTD_compress() - * note 3 : decompressed size can be very large (64-bits value), - * potentially larger than what local system can handle as a single memory segment. - * In which case, it's necessary to use streaming mode to decompress data. - * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. - * Always ensure result fits within application's authorized limits. - * Each application can set its own limits. - * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to - * read each contained frame header. This is fast as most of the data is skipped, - * however it does mean that all frame data must be present and valid. */ -ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); - -/*! ZSTD_decompressBound() : - * `src` should point to the start of a series of ZSTD encoded and/or skippable frames - * `srcSize` must be the _exact_ size of this series - * (i.e. there should be a frame boundary at `src + srcSize`) - * @return : - upper-bound for the decompressed size of all data in all successive frames - * - if an error occurred: ZSTD_CONTENTSIZE_ERROR - * - * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame. - * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`. - * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value. - * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: - * upper-bound = # blocks * min(128 KB, Window_Size) - */ -ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); - -/*! ZSTD_frameHeaderSize() : - * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. - * @return : size of the Frame Header, - * or an error code (if srcSize is too small) */ -ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); - -typedef enum { - ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ - ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ -} ZSTD_sequenceFormat_e; - -/*! ZSTD_generateSequences() : - * Generate sequences using ZSTD_compress2, given a source buffer. - * - * Each block will end with a dummy sequence - * with offset == 0, matchLength == 0, and litLength == length of last literals. - * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0) - * simply acts as a block delimiter. - * - * zc can be used to insert custom compression params. - * This function invokes ZSTD_compress2 - * - * The output of this function can be fed into ZSTD_compressSequences() with CCtx - * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters - * @return : number of sequences generated - */ - -ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, - size_t outSeqsSize, const void* src, size_t srcSize); - -/*! ZSTD_mergeBlockDelimiters() : - * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - * by merging them into into the literals of the next sequence. - * - * As such, the final generated result has no explicit representation of block boundaries, - * and the final last literals segment is not represented in the sequences. - * - * The output of this function can be fed into ZSTD_compressSequences() with CCtx - * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters - * @return : number of sequences left after merging - */ -ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); - -/*! ZSTD_compressSequences() : - * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. - * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) - * The entire source is compressed into a single frame. - * - * The compression behavior changes based on cctx params. In particular: - * If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain - * no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on - * the block size derived from the cctx, and sequences may be split. This is the default setting. - * - * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain - * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. - * - * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined - * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for - * specifics regarding offset/matchlength requirements) then the function will bail out and return an error. - * - * In addition to the two adjustable experimental params, there are other important cctx params. - * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. - * - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression. - * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset - * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md - * - * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. - * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, - * and cannot emit an RLE block that disagrees with the repcode history - * @return : final compressed size or a ZSTD error. - */ -ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize); - - -/*! ZSTD_writeSkippableFrame() : - * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. - * - * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, - * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. - * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so - * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. - * - * Returns an error if destination buffer is not large enough, if the source size is not representable - * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). - * - * @return : number of bytes written or a ZSTD error. - */ -ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, - const void* src, size_t srcSize, unsigned magicVariant); - -/*! ZSTD_readSkippableFrame() : - * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. - * - * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, - * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested - * in the magicVariant. - * - * Returns an error if destination buffer is not large enough, or if the frame is not skippable. - * - * @return : number of bytes written or a ZSTD error. - */ -ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, - const void* src, size_t srcSize); - -/*! ZSTD_isSkippableFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. - */ -ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); - - - -/*************************************** -* Memory management -***************************************/ - -/*! ZSTD_estimate*() : - * These functions make it possible to estimate memory usage - * of a future {D,C}Ctx, before its creation. - * - * ZSTD_estimateCCtxSize() will provide a memory budget large enough - * for any compression level up to selected one. - * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate - * does not include space for a window buffer. - * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming. - * The estimate will assume the input may be arbitrarily large, - * which is the worst case. - * - * When srcSize can be bound by a known and rather "small" value, - * this fact can be used to provide a tighter estimation - * because the CCtx compression context will need less memory. - * This tighter estimation can be provided by more advanced functions - * ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(), - * and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter(). - * Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits. - * - * Note 2 : only single-threaded compression is supported. - * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. - */ -ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); -ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); - -/*! ZSTD_estimateCStreamSize() : - * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. - * It will also consider src size to be arbitrarily "large", which is worst case. - * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. - * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. - * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1. - * Note : CStream size estimation is only correct for single-threaded compression. - * ZSTD_DStream memory budget depends on window Size. - * This information can be passed manually, using ZSTD_estimateDStreamSize, - * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); - * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), - * an internal ?Dict will be created, which additional size is not estimated here. - * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); -ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize); -ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); - -/*! ZSTD_estimate?DictSize() : - * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). - * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). - * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. - */ -ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); -ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); -ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); - -/*! ZSTD_initStatic*() : - * Initialize an object using a pre-allocated fixed-size buffer. - * workspace: The memory area to emplace the object into. - * Provided pointer *must be 8-bytes aligned*. - * Buffer must outlive object. - * workspaceSize: Use ZSTD_estimate*Size() to determine - * how large workspace must be to support target scenario. - * @return : pointer to object (same address as workspace, just different type), - * or NULL if error (size too small, incorrect alignment, etc.) - * Note : zstd will never resize nor malloc() when using a static buffer. - * If the object requires more memory than available, - * zstd will just error out (typically ZSTD_error_memory_allocation). - * Note 2 : there is no corresponding "free" function. - * Since workspace is allocated externally, it must be freed externally too. - * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level - * into its associated cParams. - * Limitation 1 : currently not compatible with internal dictionary creation, triggered by - * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). - * Limitation 2 : static cctx currently not compatible with multi-threading. - * Limitation 3 : static dctx is incompatible with legacy support. - */ -ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ - -ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ - -ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams); - -ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict( - void* workspace, size_t workspaceSize, - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType); - - -/*! Custom memory allocation : - * These prototypes make it possible to pass your own allocation/free functions. - * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. - * All allocation/free operations will be completed using these custom variants instead of regular ones. - */ -typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); -typedef void (*ZSTD_freeFunction) (void* opaque, void* address); -typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; -static -#ifdef __GNUC__ -__attribute__((__unused__)) -#endif -ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ - -ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); - -ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_compressionParameters cParams, - ZSTD_customMem customMem); - -/*! Thread pool : - * These prototypes make it possible to share a thread pool among multiple compression contexts. - * This can limit resources for applications with multiple threads where each one uses - * a threaded compression mode (via ZSTD_c_nbWorkers parameter). - * ZSTD_createThreadPool creates a new thread pool with a given number of threads. - * Note that the lifetime of such pool must exist while being used. - * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value - * to use an internal thread pool). - * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. - */ -typedef struct POOL_ctx_s ZSTD_threadPool; -ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); -ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); - - -/* - * This API is temporary and is expected to change or disappear in the future! - */ -ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2( - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - const ZSTD_CCtx_params* cctxParams, - ZSTD_customMem customMem); - -ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced( - const void* dict, size_t dictSize, - ZSTD_dictLoadMethod_e dictLoadMethod, - ZSTD_dictContentType_e dictContentType, - ZSTD_customMem customMem); - - -/*************************************** -* Advanced compression functions -***************************************/ - -/*! ZSTD_createCDict_byReference() : - * Create a digested dictionary for compression - * Dictionary content is just referenced, not duplicated. - * As a consequence, `dictBuffer` **must** outlive CDict, - * and its content must remain unmodified throughout the lifetime of CDict. - * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ -ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); - -/*! ZSTD_getCParams() : - * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. - * `estimatedSrcSize` value is optional, select 0 if not known */ -ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); - -/*! ZSTD_getParams() : - * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. - * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ -ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); - -/*! ZSTD_checkCParams() : - * Ensure param values remain within authorized range. - * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ -ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); - -/*! ZSTD_adjustCParams() : - * optimize params for a given `srcSize` and `dictSize`. - * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN. - * `dictSize` must be `0` when there is no dictionary. - * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. - * This function never fails (wide contract) */ -ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); - -/*! ZSTD_compress_advanced() : - * Note : this function is now DEPRECATED. - * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. - * This prototype will generate compilation warnings. */ -ZSTD_DEPRECATED("use ZSTD_compress2") -size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const void* dict,size_t dictSize, - ZSTD_parameters params); - -/*! ZSTD_compress_usingCDict_advanced() : - * Note : this function is now DEPRECATED. - * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. - * This prototype will generate compilation warnings. */ -ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary") -size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams); - - -/*! ZSTD_CCtx_loadDictionary_byReference() : - * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. - * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); - -/*! ZSTD_CCtx_loadDictionary_advanced() : - * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_CCtx_refPrefix_advanced() : - * Same as ZSTD_CCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); - -/* === experimental parameters === */ -/* these parameters can be used with ZSTD_setParameter() - * they are not guaranteed to remain supported in the future */ - - /* Enables rsyncable mode, - * which makes compressed files more rsync friendly - * by adding periodic synchronization points to the compressed data. - * The target average block size is ZSTD_c_jobSize / 2. - * It's possible to modify the job size to increase or decrease - * the granularity of the synchronization point. - * Once the jobSize is smaller than the window size, - * it will result in compression ratio degradation. - * NOTE 1: rsyncable mode only works when multithreading is enabled. - * NOTE 2: rsyncable performs poorly in combination with long range mode, - * since it will decrease the effectiveness of synchronization points, - * though mileage may vary. - * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s. - * If the selected compression level is already running significantly slower, - * the overall speed won't be significantly impacted. - */ - #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1 - -/* Select a compression format. - * The value must be of type ZSTD_format_e. - * See ZSTD_format_e enum definition for details */ -#define ZSTD_c_format ZSTD_c_experimentalParam2 - -/* Force back-reference distances to remain < windowSize, - * even when referencing into Dictionary content (default:0) */ -#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3 - -/* Controls whether the contents of a CDict - * are used in place, or copied into the working context. - * Accepts values from the ZSTD_dictAttachPref_e enum. - * See the comments on that enum for an explanation of the feature. */ -#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 - -/* Controlled with ZSTD_paramSwitch_e enum. - * Default is ZSTD_ps_auto. - * Set to ZSTD_ps_disable to never compress literals. - * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals - * may still be emitted if huffman is not beneficial to use.) - * - * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use - * literals compression based on the compression parameters - specifically, - * negative compression levels do not use literal compression. - */ -#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 - -/* Tries to fit compressed block size to be around targetCBlockSize. - * No target when targetCBlockSize == 0. - * There is no guarantee on compressed block size (default:0) */ -#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6 - -/* User's best guess of source size. - * Hint is not valid when srcSizeHint == 0. - * There is no guarantee that hint is close to actual source size, - * but compression ratio may regress significantly if guess considerably underestimates */ -#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7 - -/* Controls whether the new and experimental "dedicated dictionary search - * structure" can be used. This feature is still rough around the edges, be - * prepared for surprising behavior! - * - * How to use it: - * - * When using a CDict, whether to use this feature or not is controlled at - * CDict creation, and it must be set in a CCtxParams set passed into that - * construction (via ZSTD_createCDict_advanced2()). A compression will then - * use the feature or not based on how the CDict was constructed; the value of - * this param, set in the CCtx, will have no effect. - * - * However, when a dictionary buffer is passed into a CCtx, such as via - * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control - * whether the CDict that is created internally can use the feature or not. - * - * What it does: - * - * Normally, the internal data structures of the CDict are analogous to what - * would be stored in a CCtx after compressing the contents of a dictionary. - * To an approximation, a compression using a dictionary can then use those - * data structures to simply continue what is effectively a streaming - * compression where the simulated compression of the dictionary left off. - * Which is to say, the search structures in the CDict are normally the same - * format as in the CCtx. - * - * It is possible to do better, since the CDict is not like a CCtx: the search - * structures are written once during CDict creation, and then are only read - * after that, while the search structures in the CCtx are both read and - * written as the compression goes along. This means we can choose a search - * structure for the dictionary that is read-optimized. - * - * This feature enables the use of that different structure. - * - * Note that some of the members of the ZSTD_compressionParameters struct have - * different semantics and constraints in the dedicated search structure. It is - * highly recommended that you simply set a compression level in the CCtxParams - * you pass into the CDict creation call, and avoid messing with the cParams - * directly. - * - * Effects: - * - * This will only have any effect when the selected ZSTD_strategy - * implementation supports this feature. Currently, that's limited to - * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2. - * - * Note that this means that the CDict tables can no longer be copied into the - * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be - * usable. The dictionary can only be attached or reloaded. - * - * In general, you should expect compression to be faster--sometimes very much - * so--and CDict creation to be slightly slower. Eventually, we will probably - * make this mode the default. - */ -#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8 - -/* ZSTD_c_stableInBuffer - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable. - * - * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same - * between calls, except for the modifications that zstd makes to pos (the - * caller must not modify pos). This is checked by the compressor, and - * compression will fail if it ever changes. This means the only flush - * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end - * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos) - * MUST not be modified during compression or you will get data corruption. - * - * When this flag is enabled zstd won't allocate an input window buffer, - * because the user guarantees it can reference the ZSTD_inBuffer until - * the frame is complete. But, it will still allocate an output buffer - * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also - * avoid the memcpy() from the input buffer to the input window buffer. - * - * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used. - * That means this flag cannot be used with ZSTD_compressStream(). - * - * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using - * this flag is ALWAYS memory safe, and will never access out-of-bounds - * memory. However, compression WILL fail if you violate the preconditions. - * - * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST - * not be modified during compression or you will get data corruption. This - * is because zstd needs to reference data in the ZSTD_inBuffer to find - * matches. Normally zstd maintains its own window buffer for this purpose, - * but passing this flag tells zstd to use the user provided buffer. - */ -#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 - -/* ZSTD_c_stableOutBuffer - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable. - * - * Tells he compressor that the ZSTD_outBuffer will not be resized between - * calls. Specifically: (out.size - out.pos) will never grow. This gives the - * compressor the freedom to say: If the compressed data doesn't fit in the - * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to - * always decompress directly into the output buffer, instead of decompressing - * into an internal buffer and copying to the output buffer. - * - * When this flag is enabled zstd won't allocate an output buffer, because - * it can write directly to the ZSTD_outBuffer. It will still allocate the - * input window buffer (see ZSTD_c_stableInBuffer). - * - * Zstd will check that (out.size - out.pos) never grows and return an error - * if it does. While not strictly necessary, this should prevent surprises. - */ -#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10 - -/* ZSTD_c_blockDelimiters - * Default is 0 == ZSTD_sf_noBlockDelimiters. - * - * For use with sequence compression API: ZSTD_compressSequences(). - * - * Designates whether or not the given array of ZSTD_Sequence contains block delimiters - * and last literals, which are defined as sequences with offset == 0 and matchLength == 0. - * See the definition of ZSTD_Sequence for more specifics. - */ -#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11 - -/* ZSTD_c_validateSequences - * Default is 0 == disabled. Set to 1 to enable sequence validation. - * - * For use with sequence compression API: ZSTD_compressSequences(). - * Designates whether or not we validate sequences provided to ZSTD_compressSequences() - * during function execution. - * - * Without validation, providing a sequence that does not conform to the zstd spec will cause - * undefined behavior, and may produce a corrupted block. - * - * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for - * specifics regarding offset/matchlength requirements) then the function will bail out and - * return an error. - * - */ -#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 - -/* ZSTD_c_useBlockSplitter - * Controlled with ZSTD_paramSwitch_e enum. - * Default is ZSTD_ps_auto. - * Set to ZSTD_ps_disable to never use block splitter. - * Set to ZSTD_ps_enable to always use block splitter. - * - * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use - * block splitting based on the compression parameters. - */ -#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 - -/* ZSTD_c_useRowMatchFinder - * Controlled with ZSTD_paramSwitch_e enum. - * Default is ZSTD_ps_auto. - * Set to ZSTD_ps_disable to never use row-based matchfinder. - * Set to ZSTD_ps_enable to force usage of row-based matchfinder. - * - * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use - * the row-based matchfinder based on support for SIMD instructions and the window log. - * Note that this only pertains to compression strategies: greedy, lazy, and lazy2 - */ -#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14 - -/* ZSTD_c_deterministicRefPrefix - * Default is 0 == disabled. Set to 1 to enable. - * - * Zstd produces different results for prefix compression when the prefix is - * directly adjacent to the data about to be compressed vs. when it isn't. - * This is because zstd detects that the two buffers are contiguous and it can - * use a more efficient match finding algorithm. However, this produces different - * results than when the two buffers are non-contiguous. This flag forces zstd - * to always load the prefix in non-contiguous mode, even if it happens to be - * adjacent to the data, to guarantee determinism. - * - * If you really care about determinism when using a dictionary or prefix, - * like when doing delta compression, you should select this option. It comes - * at a speed penalty of about ~2.5% if the dictionary and data happened to be - * contiguous, and is free if they weren't contiguous. We don't expect that - * intentionally making the dictionary and data contiguous will be worth the - * cost to memcpy() the data. - */ -#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 - -/*! ZSTD_CCtx_getParameter() : - * Get the requested compression parameter value, selected by enum ZSTD_cParameter, - * and store it into int* value. - * @return : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); - - -/*! ZSTD_CCtx_params : - * Quick howto : - * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure - * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into - * an existing ZSTD_CCtx_params structure. - * This is similar to - * ZSTD_CCtx_setParameter(). - * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to - * an existing CCtx. - * These parameters will be applied to - * all subsequent frames. - * - ZSTD_compressStream2() : Do compression using the CCtx. - * - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer. - * - * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() - * for static allocation of CCtx for single-threaded compression. - */ -ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); -ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ - -/*! ZSTD_CCtxParams_reset() : - * Reset params to default values. - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); - -/*! ZSTD_CCtxParams_init() : - * Initializes the compression parameters of cctxParams according to - * compression level. All other parameters are reset to their default values. - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); - -/*! ZSTD_CCtxParams_init_advanced() : - * Initializes the compression and frame parameters of cctxParams according to - * params. All other parameters are reset to their default values. - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); - -/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ - * Similar to ZSTD_CCtx_setParameter. - * Set one compression parameter, selected by enum ZSTD_cParameter. - * Parameters must be applied to a ZSTD_CCtx using - * ZSTD_CCtx_setParametersUsingCCtxParams(). - * @result : a code representing success or failure (which can be tested with - * ZSTD_isError()). - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); - -/*! ZSTD_CCtxParams_getParameter() : - * Similar to ZSTD_CCtx_getParameter. - * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. - * @result : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); - -/*! ZSTD_CCtx_setParametersUsingCCtxParams() : - * Apply a set of ZSTD_CCtx_params to the compression context. - * This can be done even after compression is started, - * if nbWorkers==0, this will have no impact until a new compression is started. - * if nbWorkers>=1, new parameters will be picked up at next job, - * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). - */ -ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( - ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); - -/*! ZSTD_compressStream2_simpleArgs() : - * Same as ZSTD_compressStream2(), - * but using only integral types as arguments. - * This variant might be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ -ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs ( - ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos, - ZSTD_EndDirective endOp); - - -/*************************************** -* Advanced decompression functions -***************************************/ - -/*! ZSTD_isFrame() : - * Tells if the content of `buffer` starts with a valid Frame Identifier. - * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. - * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. - * Note 3 : Skippable Frame Identifiers are considered valid. */ -ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size); - -/*! ZSTD_createDDict_byReference() : - * Create a digested dictionary, ready to start decompression operation without startup delay. - * Dictionary content is referenced, and therefore stays in dictBuffer. - * It is important that dictBuffer outlives DDict, - * it must remain read accessible throughout the lifetime of DDict */ -ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); - -/*! ZSTD_DCtx_loadDictionary_byReference() : - * Same as ZSTD_DCtx_loadDictionary(), - * but references `dict` content instead of copying it into `dctx`. - * This saves memory if `dict` remains around., - * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ -ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); - -/*! ZSTD_DCtx_loadDictionary_advanced() : - * Same as ZSTD_DCtx_loadDictionary(), - * but gives direct control over - * how to load the dictionary (by copy ? by reference ?) - * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ -ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_DCtx_refPrefix_advanced() : - * Same as ZSTD_DCtx_refPrefix(), but gives finer control over - * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); - -/*! ZSTD_DCtx_setMaxWindowSize() : - * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. - * This protects a decoder context from reserving too much memory for itself (potential attack scenario). - * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode. - * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) - * @return : 0, or an error code (which can be tested using ZSTD_isError()). - */ -ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); - -/*! ZSTD_DCtx_getParameter() : - * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, - * and store it into int* value. - * @return : 0, or an error code (which can be tested with ZSTD_isError()). - */ -ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); - -/* ZSTD_d_format - * experimental parameter, - * allowing selection between ZSTD_format_e input compression formats - */ -#define ZSTD_d_format ZSTD_d_experimentalParam1 -/* ZSTD_d_stableOutBuffer - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable. - * - * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same - * between calls, except for the modifications that zstd makes to pos (the - * caller must not modify pos). This is checked by the decompressor, and - * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer - * MUST be large enough to fit the entire decompressed frame. This will be - * checked when the frame content size is known. The data in the ZSTD_outBuffer - * in the range [dst, dst + pos) MUST not be modified during decompression - * or you will get data corruption. - * - * When this flags is enabled zstd won't allocate an output buffer, because - * it can write directly to the ZSTD_outBuffer, but it will still allocate - * an input buffer large enough to fit any compressed block. This will also - * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. - * If you need to avoid the input buffer allocation use the buffer-less - * streaming API. - * - * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using - * this flag is ALWAYS memory safe, and will never access out-of-bounds - * memory. However, decompression WILL fail if you violate the preconditions. - * - * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST - * not be modified during decompression or you will get data corruption. This - * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate - * matches. Normally zstd maintains its own buffer for this purpose, but passing - * this flag tells zstd to use the user provided buffer. - */ -#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2 - -/* ZSTD_d_forceIgnoreChecksum - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable - * - * Tells the decompressor to skip checksum validation during decompression, regardless - * of whether checksumming was specified during compression. This offers some - * slight performance benefits, and may be useful for debugging. - * Param has values of type ZSTD_forceIgnoreChecksum_e - */ -#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3 - -/* ZSTD_d_refMultipleDDicts - * Experimental parameter. - * Default is 0 == disabled. Set to 1 to enable - * - * If enabled and dctx is allocated on the heap, then additional memory will be allocated - * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict() - * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead - * store all references. At decompression time, the appropriate dictID is selected - * from the set of DDicts based on the dictID in the frame. - * - * Usage is simply calling ZSTD_refDDict() on multiple dict buffers. - * - * Param has values of byte ZSTD_refMultipleDDicts_e - * - * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory - * allocation for the hash table. ZSTD_freeDCtx() also frees this memory. - * Memory is allocated as per ZSTD_DCtx::customMem. - * - * Although this function allocates memory for the table, the user is still responsible for - * memory management of the underlying ZSTD_DDict* themselves. - */ -#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 - - -/*! ZSTD_DCtx_setFormat() : - * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). - * Instruct the decoder context about what kind of data to decode next. - * This instruction is mandatory to decode data without a fully-formed header, - * such ZSTD_f_zstd1_magicless for example. - * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") -size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); - -/*! ZSTD_decompressStream_simpleArgs() : - * Same as ZSTD_decompressStream(), - * but using only integral types as arguments. - * This can be helpful for binders from dynamic languages - * which have troubles handling structures containing memory pointers. - */ -ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( - ZSTD_DCtx* dctx, - void* dst, size_t dstCapacity, size_t* dstPos, - const void* src, size_t srcSize, size_t* srcPos); - - -/******************************************************************** -* Advanced streaming functions -* Warning : most of these functions are now redundant with the Advanced API. -* Once Advanced API reaches "stable" status, -* redundant functions will be deprecated, and then at some point removed. -********************************************************************/ - -/*===== Advanced Streaming compression functions =====*/ - -/*! ZSTD_initCStream_srcSize() : - * This function is DEPRECATED, and equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * - * pledgedSrcSize must be correct. If it is not known at init time, use - * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, - * "0" also disables frame content size field. It may be enabled in the future. - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") -size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, - int compressionLevel, - unsigned long long pledgedSrcSize); - -/*! ZSTD_initCStream_usingDict() : - * This function is DEPRECATED, and is equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); - * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - * - * Creates of an internal CDict (incompatible with static CCtx), except if - * dict == NULL or dictSize < 8, in which case no dict is used. - * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if - * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") -size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - int compressionLevel); - -/*! ZSTD_initCStream_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd parameter and leave the rest as-is. - * for ((param, value) : params) { - * ZSTD_CCtx_setParameter(zcs, param, value); - * } - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); - * - * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. - * pledgedSrcSize must be correct. - * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") -size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, - const void* dict, size_t dictSize, - ZSTD_parameters params, - unsigned long long pledgedSrcSize); - -/*! ZSTD_initCStream_usingCDict() : - * This function is DEPRECATED, and equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_refCDict(zcs, cdict); - * - * note : cdict will just be referenced, and must outlive compression session - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") -size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); - -/*! ZSTD_initCStream_usingCDict_advanced() : - * This function is DEPRECATED, and is approximately equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * // Pseudocode: Set each zstd frame parameter and leave the rest as-is. - * for ((fParam, value) : fParams) { - * ZSTD_CCtx_setParameter(zcs, fParam, value); - * } - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * ZSTD_CCtx_refCDict(zcs, cdict); - * - * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. - * pledgedSrcSize must be correct. If srcSize is not known at init time, use - * value ZSTD_CONTENTSIZE_UNKNOWN. - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") -size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, - const ZSTD_CDict* cdict, - ZSTD_frameParameters fParams, - unsigned long long pledgedSrcSize); - -/*! ZSTD_resetCStream() : - * This function is DEPRECATED, and is equivalent to: - * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); - * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); - * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but - * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be - * explicitly specified. - * - * start a new frame, using same parameters from previous frame. - * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. - * Note that zcs must be init at least once before using ZSTD_resetCStream(). - * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. - * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. - * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, - * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. - * @return : 0, or an error code (which can be tested using ZSTD_isError()) - * This prototype will generate compilation warnings. - */ -ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") -size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); - - -typedef struct { - unsigned long long ingested; /* nb input bytes read and buffered */ - unsigned long long consumed; /* nb input bytes actually compressed */ - unsigned long long produced; /* nb of compressed bytes generated and buffered */ - unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */ - unsigned currentJobID; /* MT only : latest started job nb */ - unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */ -} ZSTD_frameProgression; - -/* ZSTD_getFrameProgression() : - * tells how much data has been ingested (read from input) - * consumed (input actually compressed) and produced (output) for current frame. - * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. - * Aggregates progression inside active worker threads. - */ -ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); - -/*! ZSTD_toFlushNow() : - * Tell how many bytes are ready to be flushed immediately. - * Useful for multithreading scenarios (nbWorkers >= 1). - * Probe the oldest active job, defined as oldest job not yet entirely flushed, - * and check its output buffer. - * @return : amount of data stored in oldest job and ready to be flushed immediately. - * if @return == 0, it means either : - * + there is no active job (could be checked with ZSTD_frameProgression()), or - * + oldest job is still actively compressing data, - * but everything it has produced has also been flushed so far, - * therefore flush speed is limited by production speed of oldest job - * irrespective of the speed of concurrent (and newer) jobs. - */ -ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); - - -/*===== Advanced Streaming decompression functions =====*/ - -/*! - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); - * - * note: no dictionary will be used if dict == NULL or dictSize < 8 - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); - -/*! - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * ZSTD_DCtx_refDDict(zds, ddict); - * - * note : ddict is referenced, it must outlive decompression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); - -/*! - * This function is deprecated, and is equivalent to: - * - * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); - * - * re-use decompression parameters from previous init; saves dictionary loading - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x - */ -ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); - - -/********************************************************************* -* Buffer-less and synchronous inner streaming functions -* -* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. -* But it's also a complex one, with several restrictions, documented below. -* Prefer normal streaming API for an easier experience. -********************************************************************* */ - -/** - Buffer-less streaming compression (synchronous mode) - - A ZSTD_CCtx object is required to track streaming operations. - Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. - ZSTD_CCtx object can be re-used multiple times within successive compression operations. - - Start by initializing a context. - Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. - It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() - - Then, consume your input using ZSTD_compressContinue(). - There are some important considerations to keep in mind when using this advanced function : - - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only. - - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks. - - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. - Worst case evaluation is provided by ZSTD_compressBound(). - ZSTD_compressContinue() doesn't guarantee recover after a failed compression. - - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). - It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) - - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. - In which case, it will "discard" the relevant memory section from its history. - - Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. - It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. - Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. - - `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. -*/ - -/*===== Buffer-less streaming compression functions =====*/ -ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); -ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); -ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - -/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ -ZSTD_DEPRECATED("use advanced API to access custom parameters") -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTD_DEPRECATED("use advanced API to access custom parameters") -size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ -/** - Buffer-less streaming decompression (synchronous mode) - - A ZSTD_DCtx object is required to track streaming operations. - Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. - A ZSTD_DCtx object can be re-used multiple times. - - First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). - Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. - Data fragment must be large enough to ensure successful decoding. - `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. - @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. - >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. - errorCode, which can be tested using ZSTD_isError(). - - It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, - such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). - Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. - As a consequence, check that values remain within valid application range. - For example, do not allocate memory blindly, check that `windowSize` is within expectation. - Each application can set its own limits, depending on local restrictions. - For extended interoperability, it is recommended to support `windowSize` of at least 8 MB. - - ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes. - ZSTD_decompressContinue() is very sensitive to contiguity, - if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, - or that previous contiguous segment is large enough to properly handle maximum back-reference distance. - There are multiple ways to guarantee this condition. - - The most memory efficient way is to use a round buffer of sufficient size. - Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), - which can @return an error code if required value is too large for current system (in 32-bits mode). - In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, - up to the moment there is not enough room left in the buffer to guarantee decoding another full block, - which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. - At which point, decoding can resume from the beginning of the buffer. - Note that already decoded data stored in the buffer should be flushed before being overwritten. - - There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory. - - Finally, if you control the compression process, you can also ignore all buffer size rules, - as long as the encoder and decoder progress in "lock-step", - aka use exactly the same buffer sizes, break contiguity at the same place, etc. - - Once buffers are setup, start decompression, with ZSTD_decompressBegin(). - If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict(). - - Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. - ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). - ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. - - @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). - It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. - It can also be an error code, which can be tested with ZSTD_isError(). - - A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. - Context can then be reset to start a new decompression. - - Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). - This information is not required to properly decode a frame. - - == Special case : skippable frames == - - Skippable frames allow integration of user-defined data into a flow of concatenated frames. - Skippable frames will be ignored (skipped) by decompressor. - The format of skippable frames is as follows : - a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F - b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits - c) Frame Content - any content (User Data) of length equal to Frame Size - For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. - For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. -*/ - -/*===== Buffer-less streaming decompression functions =====*/ -typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; -typedef struct { - unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ - unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ - unsigned blockSizeMax; - ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ - unsigned headerSize; - unsigned dictID; - unsigned checksumFlag; -} ZSTD_frameHeader; - -/*! ZSTD_getFrameHeader() : - * decode Frame Header, or requires larger `srcSize`. - * @return : 0, `zfhPtr` is correctly filled, - * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ -/*! ZSTD_getFrameHeader_advanced() : - * same as ZSTD_getFrameHeader(), - * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); - -ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); -ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - -/* misc */ -ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); -typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; -ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); - - - - -/* ============================ */ -/** Block level API */ -/* ============================ */ - -/*! - Block functions produce and decode raw zstd blocks, without frame metadata. - Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). - But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. - - A few rules to respect : - - Compressing and decompressing require a context structure - + Use ZSTD_createCCtx() and ZSTD_createDCtx() - - It is necessary to init context before starting - + compression : any ZSTD_compressBegin*() variant, including with dictionary - + decompression : any ZSTD_decompressBegin*() variant, including with dictionary - + copyCCtx() and copyDCtx() can be used too - - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB - + If input is larger than a block size, it's necessary to split input data into multiple blocks - + For inputs larger than a single block, consider using regular ZSTD_compress() instead. - Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block. - - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) ! - ===> In which case, nothing is produced into `dst` ! - + User __must__ test for such outcome and deal directly with uncompressed data - + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0. - Doing so would mess up with statistics history, leading to potential data corruption. - + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !! - + In case of multiple successive blocks, should some of them be uncompressed, - decoder must be informed of their existence in order to follow proper history. - Use ZSTD_insertBlock() for such a case. -*/ - -/*===== Raw zstd block functions =====*/ -ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); -ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ - - -#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ - -#if defined (__cplusplus) -} -#endif diff --git a/vendor/github.com/valyala/gozstd/zstd_errors.h b/vendor/github.com/valyala/gozstd/zstd_errors.h deleted file mode 100644 index fa3686b772..0000000000 --- a/vendor/github.com/valyala/gozstd/zstd_errors.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_ERRORS_H_398273423 -#define ZSTD_ERRORS_H_398273423 - -#if defined (__cplusplus) -extern "C" { -#endif - -/*===== dependency =====*/ -#include /* size_t */ - - -/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDERRORLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) -# else -# define ZSTDERRORLIB_VISIBILITY -# endif -#endif -#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY -#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY -#endif - -/*-********************************************* - * Error codes list - *-********************************************* - * Error codes _values_ are pinned down since v1.3.1 only. - * Therefore, don't rely on values if you may link to any version < v1.3.1. - * - * Only values < 100 are considered stable. - * - * note 1 : this API shall be used with static linking only. - * dynamic linking is not yet officially supported. - * note 2 : Prefer relying on the enum than on its value whenever possible - * This is the only supported way to use the error list < v1.3.1 - * note 3 : ZSTD_isError() is always correct, whatever the library version. - **********************************************/ -typedef enum { - ZSTD_error_no_error = 0, - ZSTD_error_GENERIC = 1, - ZSTD_error_prefix_unknown = 10, - ZSTD_error_version_unsupported = 12, - ZSTD_error_frameParameter_unsupported = 14, - ZSTD_error_frameParameter_windowTooLarge = 16, - ZSTD_error_corruption_detected = 20, - ZSTD_error_checksum_wrong = 22, - ZSTD_error_dictionary_corrupted = 30, - ZSTD_error_dictionary_wrong = 32, - ZSTD_error_dictionaryCreation_failed = 34, - ZSTD_error_parameter_unsupported = 40, - ZSTD_error_parameter_outOfBound = 42, - ZSTD_error_tableLog_tooLarge = 44, - ZSTD_error_maxSymbolValue_tooLarge = 46, - ZSTD_error_maxSymbolValue_tooSmall = 48, - ZSTD_error_stage_wrong = 60, - ZSTD_error_init_missing = 62, - ZSTD_error_memory_allocation = 64, - ZSTD_error_workSpace_tooSmall= 66, - ZSTD_error_dstSize_tooSmall = 70, - ZSTD_error_srcSize_wrong = 72, - ZSTD_error_dstBuffer_null = 74, - /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ - ZSTD_error_frameIndex_tooLarge = 100, - ZSTD_error_seekableIO = 102, - ZSTD_error_dstBuffer_wrong = 104, - ZSTD_error_srcBuffer_wrong = 105, - ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ -} ZSTD_ErrorCode; - -/*! ZSTD_getErrorCode() : - convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, - which can be used to compare with enum list published above */ -ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); -ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ - - -#if defined (__cplusplus) -} -#endif - -#endif /* ZSTD_ERRORS_H_398273423 */ diff --git a/vendor/modules.txt b/vendor/modules.txt index c2b34cc413..093794112e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -71,6 +71,7 @@ github.com/hashicorp/go-multierror ## explicit github.com/json-iterator/go # github.com/klauspost/compress v1.13.6 +## explicit github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -134,9 +135,6 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/valyala/gozstd v1.15.1 -## explicit -github.com/valyala/gozstd # github.com/vbatts/tar-split v0.11.2 ## explicit github.com/vbatts/tar-split/archive/tar