diff --git a/Dockerfile b/Dockerfile index a851208e..0b487748 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,8 +3,9 @@ ARG GO_VERSION=1.23.2 FROM golang:${GO_VERSION}-alpine AS build-go +RUN apk update && apk add --no-cache gcc musl-dev WORKDIR /src -ENV CGO_ENABLED=0 +ENV CGO_ENABLED=1 COPY --link go.* ./ RUN --mount=type=cache,target=/go/pkg/mod go mod download COPY --link . . @@ -18,7 +19,7 @@ RUN --mount=type=cache,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ go build \ -trimpath \ - -ldflags="-X '${VERSION_PATH}.Long=${VERSION_LONG}' -X '${VERSION_PATH}.Short=${VERSION_SHORT}' -X '${VERSION_PATH}.GitCommit=${VERSION_GIT_HASH}'" \ + -ldflags="-extldflags '-static' -X '${VERSION_PATH}.Long=${VERSION_LONG}' -X '${VERSION_PATH}.Short=${VERSION_SHORT}' -X '${VERSION_PATH}.GitCommit=${VERSION_GIT_HASH}'" \ -o /out/preprocessing-worker \ ./cmd/worker diff --git a/Makefile b/Makefile index d32f8569..33358036 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ else endif include hack/make/bootstrap.mk +include hack/make/dep_ent.mk include hack/make/dep_go_enum.mk include hack/make/dep_golangci_lint.mk include hack/make/dep_golines.mk @@ -27,7 +28,8 @@ include hack/make/dep_tparse.mk include hack/make/enums.mk # Lazy-evaluated list of tools. -TOOLS = $(GOLANGCI_LINT) \ +TOOLS = $(ENT) \ + $(GOLANGCI_LINT) \ $(GOMAJOR) \ $(GOSEC) \ $(GOTESTSUM) \ @@ -43,7 +45,10 @@ endef IGNORED_PACKAGES := \ github.com/artefactual-sdps/preprocessing-sfa/hack/% \ github.com/artefactual-sdps/preprocessing-sfa/internal/%/fake \ - github.com/artefactual-sdps/preprocessing-sfa/internal/enums + github.com/artefactual-sdps/preprocessing-sfa/internal/enums \ + github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db \ + github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/% \ + github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/schema PACKAGES := $(shell go list ./...) TEST_PACKAGES := $(filter-out $(IGNORED_PACKAGES),$(PACKAGES)) @@ -59,6 +64,12 @@ deps: # @HELP List available module dependency updates. deps: $(GOMAJOR) gomajor list +gen-ent: # @HELP Generate Ent assets. +gen-ent: $(ENT) + ent generate ./internal/persistence/ent/schema \ + --feature sql/versioned-migration \ + --target=./internal/persistence/ent/db + gen-mock: # @HELP Generate mocks. gen-mock: $(MOCKGEN) mockgen -typed -destination=./internal/fformat/fake/mock_identifier.go -package=fake github.com/artefactual-sdps/preprocessing-sfa/internal/fformat Identifier diff --git a/Tiltfile.enduro b/Tiltfile.enduro index 63f9cc8f..8025ddf7 100644 --- a/Tiltfile.enduro +++ b/Tiltfile.enduro @@ -31,3 +31,9 @@ k8s_resource( trigger_mode=trigger_mode ) k8s_resource("minio-ais-bucket", labels=["Preprocessing"]) +k8s_resource( + "mysql-recreate-prep-database", + labels=["Preprocessing"], + auto_init=False, + trigger_mode=TRIGGER_MODE_MANUAL +) diff --git a/cmd/worker/workercmd/cmd.go b/cmd/worker/workercmd/cmd.go index 24d2ca93..ad1ccd89 100644 --- a/cmd/worker/workercmd/cmd.go +++ b/cmd/worker/workercmd/cmd.go @@ -3,7 +3,11 @@ package workercmd import ( "context" "crypto/rand" + "errors" + "fmt" + "ariga.io/sqlcomment" + "entgo.io/ent/dialect/sql" bagit_gython "github.com/artefactual-labs/bagit-gython" "github.com/artefactual-sdps/temporal-activities/bagcreate" "github.com/artefactual-sdps/temporal-activities/bagvalidate" @@ -21,6 +25,8 @@ import ( "github.com/artefactual-sdps/preprocessing-sfa/internal/config" "github.com/artefactual-sdps/preprocessing-sfa/internal/fformat" "github.com/artefactual-sdps/preprocessing-sfa/internal/fvalidate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db" "github.com/artefactual-sdps/preprocessing-sfa/internal/workflow" ) @@ -32,6 +38,7 @@ type Main struct { temporalWorker temporalsdk_worker.Worker temporalClient temporalsdk_client.Client bagValidator *bagit_gython.BagIt + dbClient *db.Client } func NewMain(logger logr.Logger, cfg config.Configuration) *Main { @@ -137,6 +144,32 @@ func (m *Main) Run(ctx context.Context) error { temporalsdk_activity.RegisterOptions{Name: bagcreate.Name}, ) + if m.cfg.CheckDuplicates { + sqlDB, err := persistence.Open(m.cfg.Persistence.Driver, m.cfg.Persistence.DSN) + if err != nil { + m.logger.Error(err, "Error initializing database pool.") + return err + } + m.dbClient = db.NewClient( + db.Driver( + sqlcomment.NewDriver( + sql.OpenDB(m.cfg.Persistence.Driver, sqlDB), + sqlcomment.WithDriverVerTag(), + sqlcomment.WithTags(sqlcomment.Tags{ + sqlcomment.KeyApplication: Name, + }), + ), + ), + ) + if m.cfg.Persistence.Migrate { + err = m.dbClient.Schema.Create(ctx) + if err != nil { + m.logger.Error(err, "Error migrating database.") + return err + } + } + } + if err := w.Start(); err != nil { m.logger.Error(err, "Preprocessing worker failed to start.") return err @@ -146,6 +179,8 @@ func (m *Main) Run(ctx context.Context) error { } func (m *Main) Close() error { + var e error + if m.temporalWorker != nil { m.temporalWorker.Stop() } @@ -156,9 +191,15 @@ func (m *Main) Close() error { if m.bagValidator != nil { if err := m.bagValidator.Cleanup(); err != nil { - m.logger.Info("Couldn't clean up bag validator: %v", err) + e = errors.Join(e, fmt.Errorf("Couldn't clean up bag validator: %v", err)) } } - return nil + if m.dbClient != nil { + if err := m.dbClient.Close(); err != nil { + e = errors.Join(e, fmt.Errorf("Couldn't close database client: %v", err)) + } + } + + return e } diff --git a/go.mod b/go.mod index f45d7e61..a4d01bbb 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,18 @@ module github.com/artefactual-sdps/preprocessing-sfa go 1.23.2 require ( + ariga.io/sqlcomment v0.1.0 + entgo.io/ent v0.14.1 github.com/antchfx/xmlquery v1.4.2 github.com/artefactual-labs/bagit-gython v0.2.0 github.com/artefactual-sdps/temporal-activities v0.0.0-20241105002718-bc4a9d85ce42 github.com/beevik/etree v1.4.0 github.com/deckarep/golang-set/v2 v2.6.0 github.com/go-logr/logr v1.4.2 + github.com/go-sql-driver/mysql v1.8.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/mattn/go-sqlite3 v1.14.22 github.com/oklog/run v1.1.0 github.com/richardlehane/siegfried v1.11.1 github.com/spf13/pflag v1.0.5 @@ -25,7 +29,11 @@ require ( ) require ( + ariga.io/atlas v0.19.2 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/antchfx/xpath v1.3.2 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect @@ -49,7 +57,9 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -57,16 +67,18 @@ require ( github.com/google/wire v0.6.0 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kluctl/go-embed-python v0.0.0-3.12.3-20240415-1 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/nyudlts/go-bagit v0.3.0-alpha.0.20240515212815-8dab411c23af // indirect github.com/otiai10/copy v1.14.0 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/richardlehane/characterize v1.0.0 // indirect github.com/richardlehane/match v1.0.5 // indirect @@ -79,27 +91,35 @@ require ( github.com/ross-spencer/wikiprov v0.2.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sergi/go-diff v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.temporal.io/api v1.32.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 // indirect golang.org/x/image v0.17.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect google.golang.org/api v0.191.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 69306647..3ea708b0 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,7 @@ +ariga.io/atlas v0.19.2 h1:ulK06d4joEaMP06HNNPxdpD8dFgZGzjzjk+Mb5VfF08= +ariga.io/atlas v0.19.2/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE= +ariga.io/sqlcomment v0.1.0 h1:8kQPlVe3sXpTloEFlpX5dhFAXB28i6rwq9ktqqnPx70= +ariga.io/sqlcomment v0.1.0/go.mod h1:NT1IZMfBTQl1MUU5wgVONmnDqFRqtZrdDRgAXfc1g5k= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= @@ -12,11 +16,21 @@ cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +entgo.io/ent v0.14.1 h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s= +entgo.io/ent v0.14.1/go.mod h1:MH6XLG0KXpkcDQhKiHfANZSzR55TJyPL5IGNpI8wpco= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/artefactual-labs/bagit-gython v0.2.0 h1:Zje4Lb1goZVUPoxpc/k65sWtYpNgK9Rvphvaok5cYzE= github.com/artefactual-labs/bagit-gython v0.2.0/go.mod h1:C+hFZQMDnji1hjGt3nrlMK3BahaBhvo/hU2uqd+Q9Z4= github.com/artefactual-sdps/temporal-activities v0.0.0-20241105002718-bc4a9d85ce42 h1:0Ymucvkou8aiZkQrVgZsTODGeGoQHVNV414IFOFRxX0= @@ -87,13 +101,20 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -146,12 +167,14 @@ github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDP github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -168,8 +191,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/nyudlts/go-bagit v0.3.0-alpha.0.20240515212815-8dab411c23af h1:I3StjEXH279zjQyXyBFuTyf+ga1sdySf0C2xtpHU0Ag= @@ -183,8 +212,8 @@ github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -215,6 +244,8 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -252,20 +283,22 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.artefactual.dev/tools v0.17.0 h1:7X/qZYKyKT8RxVjBsksqvalQ8F4wcor6jcA0ewjc92M= go.artefactual.dev/tools v0.17.0/go.mod h1:lsu0JcKFEJanNdrf5/IFjjzxul4pazG1dDHnLX9Nkvs= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.temporal.io/api v1.32.0 h1:Jv0FieWDq0HJVqoHRE/kRHM+tIaRtR16RbXZZl+8Qb4= go.temporal.io/api v1.32.0/go.mod h1:MClRjMCgXZTKmxyItEJPRR5NuJRBhSEpuF9wuh97N6U= go.temporal.io/sdk v1.26.1 h1:ggmFBythnuuW3yQRp0VzOTrmbOf+Ddbe00TZl+CQ+6U= @@ -290,8 +323,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE= golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= @@ -308,6 +341,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -325,8 +360,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -340,8 +375,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -359,8 +394,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -374,8 +409,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -408,10 +443,10 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM= google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc= -google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 h1:+/tmTy5zAieooKIXfzDm9KiA3Bv6JBwriRN9LY+yayk= -google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 h1:V71AcdLZr2p8dC9dbOIMCpqi4EmRl8wUwnJzXXLmbmc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -433,13 +468,15 @@ google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6h google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/hack/build_dist.sh b/hack/build_dist.sh index 491cab69..80760ede 100755 --- a/hack/build_dist.sh +++ b/hack/build_dist.sh @@ -7,6 +7,9 @@ set -eu +export CGO_ENABLED=1 +export CC=musl-gcc + MODULE_PATH="${MODULE_PATH:-github.com/artefactual-sdps/preprocessing-sfa}" IFS=".$IFS" read -r major minor patch < internal/version/VERSION.txt @@ -35,6 +38,6 @@ EOF exit 0 fi -ldflags="-X ${VERSION_PATH}.Long=${LONG} -X ${VERSION_PATH}.Short=${SHORT} -X ${VERSION_PATH}.GitCommit=${GIT_HASH}" +ldflags="-extldflags '-static' -X ${VERSION_PATH}.Long=${LONG} -X ${VERSION_PATH}.Short=${SHORT} -X ${VERSION_PATH}.GitCommit=${GIT_HASH}" exec go build -ldflags "$ldflags" "$@" diff --git a/hack/kube/base/kustomization.yaml b/hack/kube/base/kustomization.yaml index 9e065040..787cd5b0 100644 --- a/hack/kube/base/kustomization.yaml +++ b/hack/kube/base/kustomization.yaml @@ -4,6 +4,7 @@ namespace: enduro-sdps resources: - mysql.yaml - namespace.yaml + - preprocessing-pvc.yaml - preprocessing-worker.yaml - role.yaml - role-binding.yaml diff --git a/hack/kube/base/preprocessing-pvc.yaml b/hack/kube/base/preprocessing-pvc.yaml new file mode 100644 index 00000000..f4f93e9e --- /dev/null +++ b/hack/kube/base/preprocessing-pvc.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: preprocessing-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/hack/kube/base/preprocessing-worker.yaml b/hack/kube/base/preprocessing-worker.yaml index 12edf4e8..8ab6f3da 100644 --- a/hack/kube/base/preprocessing-worker.yaml +++ b/hack/kube/base/preprocessing-worker.yaml @@ -24,8 +24,13 @@ spec: - name: config mountPath: /home/preprocessing/.config readOnly: true + - name: db + mountPath: /home/preprocessing/db resources: {} volumes: - name: config secret: secretName: preprocessing-secret + - name: db + persistentVolumeClaim: + claimName: preprocessing-pvc diff --git a/hack/kube/overlays/dev/kustomization.yaml b/hack/kube/overlays/dev/kustomization.yaml index 7c4c00cf..cf0401e8 100644 --- a/hack/kube/overlays/dev/kustomization.yaml +++ b/hack/kube/overlays/dev/kustomization.yaml @@ -3,7 +3,7 @@ kind: Kustomization namespace: enduro-sdps resources: - ../../base - - mysql-secret.yaml - mysql-recreate-databases-job.yaml + - mysql-secret.yaml - preprocessing-secret.yaml - start-workflow-job.yaml diff --git a/hack/kube/overlays/dev/preprocessing-secret.yaml b/hack/kube/overlays/dev/preprocessing-secret.yaml index 59d5c94f..4cb6523c 100644 --- a/hack/kube/overlays/dev/preprocessing-secret.yaml +++ b/hack/kube/overlays/dev/preprocessing-secret.yaml @@ -9,6 +9,12 @@ stringData: verbosity = 2 sharedPath = "/home/preprocessing/shared" + checkDuplicates = true + + [persistence] + dsn = "file:/home/preprocessing/db/preprocessing.db?&_txlock=immediate" + driver = "sqlite3" + migrate = true [temporal] address = "temporal.enduro-sdps:7233" diff --git a/hack/kube/overlays/enduro/kustomization.yaml b/hack/kube/overlays/enduro/kustomization.yaml index e9147a6d..0a122caf 100644 --- a/hack/kube/overlays/enduro/kustomization.yaml +++ b/hack/kube/overlays/enduro/kustomization.yaml @@ -3,6 +3,8 @@ kind: Kustomization namespace: enduro-sdps resources: - minio-ais-bucket-job.yaml + - mysql-create-prep-database-job.yaml + - mysql-recreate-prep-database-job.yaml - preprocessing-worker.yaml - preprocessing-pvc.yaml - preprocessing-secret.yaml diff --git a/hack/kube/overlays/enduro/mysql-create-prep-database-job.yaml b/hack/kube/overlays/enduro/mysql-create-prep-database-job.yaml new file mode 100644 index 00000000..5b6ea987 --- /dev/null +++ b/hack/kube/overlays/enduro/mysql-create-prep-database-job.yaml @@ -0,0 +1,32 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: mysql-create-prep-database +spec: + backoffLimit: 100 + template: + spec: + restartPolicy: OnFailure + containers: + - name: recreate-databases + image: mysql:8.0 + imagePullPolicy: IfNotPresent + env: + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-secret + key: root-password + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: mysql-secret + key: user + command: [ + "sh", + "-c", + 'mysql -h mysql.enduro-sdps -u root -p$MYSQL_ROOT_PASSWORD --execute " + CREATE DATABASE IF NOT EXISTS preprocessing_sfa; + GRANT ALL PRIVILEGES ON preprocessing_sfa.* TO ''$MYSQL_USER''@''%''; + "', + ] diff --git a/hack/kube/overlays/enduro/mysql-recreate-prep-database-job.yaml b/hack/kube/overlays/enduro/mysql-recreate-prep-database-job.yaml new file mode 100644 index 00000000..f72ad32a --- /dev/null +++ b/hack/kube/overlays/enduro/mysql-recreate-prep-database-job.yaml @@ -0,0 +1,33 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: mysql-recreate-prep-database +spec: + backoffLimit: 100 + template: + spec: + restartPolicy: OnFailure + containers: + - name: recreate-databases + image: mysql:8.0 + imagePullPolicy: IfNotPresent + env: + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-secret + key: root-password + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: mysql-secret + key: user + command: [ + "sh", + "-c", + 'mysql -h mysql.enduro-sdps -u root -p$MYSQL_ROOT_PASSWORD --execute " + DROP DATABASE IF EXISTS preprocessing_sfa; + CREATE DATABASE IF NOT EXISTS preprocessing_sfa; + GRANT ALL PRIVILEGES ON preprocessing_sfa.* TO ''$MYSQL_USER''@''%''; + "', + ] diff --git a/hack/kube/overlays/enduro/preprocessing-secret.yaml b/hack/kube/overlays/enduro/preprocessing-secret.yaml index b9cf9d85..b0491343 100644 --- a/hack/kube/overlays/enduro/preprocessing-secret.yaml +++ b/hack/kube/overlays/enduro/preprocessing-secret.yaml @@ -9,6 +9,12 @@ stringData: verbosity = 2 sharedPath = "/home/preprocessing/shared" + checkDuplicates = true + + [persistence] + dsn = "" + driver = "mysql" + migrate = true [temporal] address = "temporal.enduro-sdps:7233" diff --git a/hack/kube/overlays/enduro/preprocessing-worker.yaml b/hack/kube/overlays/enduro/preprocessing-worker.yaml index f90a6b35..451b52b4 100644 --- a/hack/kube/overlays/enduro/preprocessing-worker.yaml +++ b/hack/kube/overlays/enduro/preprocessing-worker.yaml @@ -20,6 +20,19 @@ spec: containers: - name: preprocessing-worker image: preprocessing-sfa-worker:dev + env: + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: mysql-secret + key: user + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-secret + key: password + - name: ENDURO_PREPROCESSING_PERSISTENCE_DSN + value: $(MYSQL_USER):$(MYSQL_PASSWORD)@tcp(mysql.enduro-sdps:3306)/preprocessing_sfa volumeMounts: - name: config mountPath: /home/preprocessing/.config diff --git a/hack/make/dep_ent.mk b/hack/make/dep_ent.mk new file mode 100644 index 00000000..bce548f8 --- /dev/null +++ b/hack/make/dep_ent.mk @@ -0,0 +1,20 @@ +$(call _assert_var,MAKEDIR) +$(call _conditional_include,$(MAKEDIR)/base.mk) +$(call _assert_var,UNAME_OS) +$(call _assert_var,UNAME_ARCH) +$(call _assert_var,CACHE_VERSIONS) +$(call _assert_var,CACHE_BIN) + +# Keep in sync with the ent version in go.mod. +# See https://entgo.io/docs/code-gen/#version-compatibility-between-entc-and-ent +ENT_VERSION ?= 0.14.1 + +ENT := $(CACHE_VERSIONS)/ent/$(ENT_VERSION) +$(ENT): + rm -f $(CACHE_BIN)/ent + mkdir -p $(CACHE_BIN) + env GOBIN=$(CACHE_BIN) go install entgo.io/ent/cmd/ent@v$(ENT_VERSION) + chmod +x $(CACHE_BIN)/ent + rm -rf $(dir $(ENT)) + mkdir -p $(dir $(ENT)) + touch $(ENT) diff --git a/internal/config/config.go b/internal/config/config.go index 93520dfe..e023e2fe 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -12,6 +12,7 @@ import ( "github.com/artefactual-sdps/preprocessing-sfa/internal/ais" "github.com/artefactual-sdps/preprocessing-sfa/internal/fvalidate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence" ) type ConfigurationValidator interface { @@ -35,6 +36,13 @@ type Configuration struct { // Enduro and preservation processing. SharedPath string + // CheckDuplicates enables or disables a check for SIPs that have already + // been processed. When enabled, the persistence configuration below will + // be required, and a SIP that has already been processed will fail the + // preprocessing workflow. + CheckDuplicates bool + Persistence persistence.Config + Temporal Temporal Worker WorkerConfig Bagit bagcreate.Config @@ -92,6 +100,15 @@ func (c Configuration) Validate() error { errs = errors.Join(errs, fmt.Errorf("Bagit.%v", err)) } + if c.CheckDuplicates { + if c.Persistence.DSN == "" { + errs = errors.Join(errs, errRequired("Persistence.DSN")) + } + if c.Persistence.Driver == "" { + errs = errors.Join(errs, errRequired("Persistence.Driver")) + } + } + return errs } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 39a903ee..0320a087 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -8,12 +8,18 @@ import ( "gotest.tools/v3/fs" "github.com/artefactual-sdps/preprocessing-sfa/internal/config" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence" ) const testConfig = `# Config debug = true verbosity = 2 sharedPath = "/home/preprocessing/shared" +checkDuplicates = true +[persistence] +dsn = "file:/path/to/fake.db" +driver = "sqlite3" +migrate = true [temporal] address = "host:port" namespace = "default" @@ -45,9 +51,15 @@ func TestConfig(t *testing.T) { toml: testConfig, wantFound: true, wantCfg: config.Configuration{ - Debug: true, - Verbosity: 2, - SharedPath: "/home/preprocessing/shared", + Debug: true, + Verbosity: 2, + SharedPath: "/home/preprocessing/shared", + CheckDuplicates: true, + Persistence: persistence.Config{ + DSN: "file:/path/to/fake.db", + Driver: "sqlite3", + Migrate: true, + }, Temporal: config.Temporal{ Address: "host:port", Namespace: "default", @@ -100,6 +112,21 @@ checksumAlgorithm = "unknown" wantFound: true, wantErr: `invalid configuration: Bagit.ChecksumAlgorithm: invalid value "unknown", must be one of (md5, sha1, sha256, sha512)`, + }, + { + name: "Errors when persistence configuration is missing", + configFile: "preprocessing.toml", + toml: `# Config +sharedPath = "/home/preprocessing/shared" +checkDuplicates = true +[temporal] +taskQueue = "preprocessing" +workflowName = "preprocessing" +`, + wantFound: true, + wantErr: `invalid configuration: +Persistence.DSN: missing required value +Persistence.Driver: missing required value`, }, { name: "Errors when TOML is invalid", diff --git a/internal/persistence/config.go b/internal/persistence/config.go new file mode 100644 index 00000000..d2cc85a5 --- /dev/null +++ b/internal/persistence/config.go @@ -0,0 +1,7 @@ +package persistence + +type Config struct { + Driver string + DSN string + Migrate bool +} diff --git a/internal/persistence/db.go b/internal/persistence/db.go new file mode 100644 index 00000000..d59051f8 --- /dev/null +++ b/internal/persistence/db.go @@ -0,0 +1,75 @@ +package persistence + +import ( + "database/sql" + "fmt" + "runtime" + "time" + + "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" +) + +// Open a database driver. +func Open(driver, ds string) (db *sql.DB, err error) { + switch driver { + case "mysql": + db, err = OpenMySQL(ds) + case "sqlite3": + db, err = sql.Open(driver, ds) + if err != nil { + return nil, err + } + + conns := runtime.NumCPU() + db.SetMaxOpenConns(conns) + db.SetMaxIdleConns(conns) + db.SetConnMaxLifetime(0) + db.SetConnMaxIdleTime(0) + + pragmas := []string{ + "journa_mode=WAL", + "synchronous=OFF", + "foreign_keys=ON", + "tempo_store=MEMORY", + "busy_timeout=1000", // Used with "_txlock=immediate" or "BEGIN IMMEDIATE". + } + for _, pragma := range pragmas { + if _, err := db.Exec("PRAGMA " + pragma); err != nil { + return nil, err + } + } + + default: + return nil, fmt.Errorf("database driver %q not supported", driver) + } + + return db, err +} + +// OpenMySQL opens the MySQL database driver. +func OpenMySQL(ds string) (*sql.DB, error) { + config, err := mysql.ParseDSN(ds) + if err != nil { + return nil, fmt.Errorf("error parsing dsn: %w (%s)", err, ds) + } + config.Collation = "utf8mb4_unicode_ci" + config.Loc = time.UTC + config.ParseTime = true + config.MultiStatements = true + config.Params = map[string]string{ + "time_zone": "UTC", + } + + conn, err := mysql.NewConnector(config) + if err != nil { + return nil, fmt.Errorf("error creating connector: %w", err) + } + + sqlDB := sql.OpenDB(conn) + sqlDB.SetMaxOpenConns(10) + sqlDB.SetMaxIdleConns(100) + sqlDB.SetConnMaxLifetime(time.Hour) + + return sqlDB, nil +} diff --git a/internal/persistence/ent/client/client.go b/internal/persistence/ent/client/client.go new file mode 100644 index 00000000..dd4a9e4f --- /dev/null +++ b/internal/persistence/ent/client/client.go @@ -0,0 +1,43 @@ +package client + +import ( + "context" + "errors" + "fmt" + + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db" +) + +type client struct { + ent *db.Client +} + +var _ persistence.Service = (*client)(nil) + +// New returns a new ent client that implements the persistence service. +func New(ent *db.Client) persistence.Service { + return &client{ent: ent} +} + +func (c *client) CreateSIP(ctx context.Context, name, checksum string) error { + if name == "" { + return errors.New("CreateSIP: name field is required") + } + if checksum == "" { + return errors.New("CreateSIP: checksum field is required") + } + + err := c.ent.SIP.Create(). + SetName(name). + SetChecksum(checksum). + Exec(ctx) + if err != nil { + if db.IsConstraintError(err) { + err = persistence.ErrDuplicatedSIP + } + return fmt.Errorf("CreateSIP: %w", err) + } + + return nil +} diff --git a/internal/persistence/ent/client/client_test.go b/internal/persistence/ent/client/client_test.go new file mode 100644 index 00000000..54340c0e --- /dev/null +++ b/internal/persistence/ent/client/client_test.go @@ -0,0 +1,95 @@ +package client_test + +import ( + "context" + "fmt" + "testing" + + _ "github.com/mattn/go-sqlite3" + "gotest.tools/v3/assert" + + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence" + entclient "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/client" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/enttest" +) + +func setUpClient(t *testing.T) (*db.Client, persistence.Service) { + t.Helper() + + dsn := fmt.Sprintf("file:%s?mode=memory&cache=shared&_fk=1", t.Name()) + entc := enttest.Open(t, "sqlite3", dsn) + ps := entclient.New(entc) + t.Cleanup(func() { + _ = entc.Close() + }) + + return entc, ps +} + +func TestCreateSIP(t *testing.T) { + t.Parallel() + + name := "test.zip" + checksum := "a58b0193fcd0b85b1c85ca07899e063d" + + type test struct { + name string + sipName string + sipChecksum string + initialData func(context.Context, *testing.T, *db.Client) + wantErr string + } + + for _, tt := range []test{ + { + name: "Creates a SIP", + sipName: name, + sipChecksum: checksum, + }, + { + name: "Fails to create a SIP (missing checksum)", + sipName: name, + wantErr: "CreateSIP: checksum field is required", + }, + { + name: "Fails to create a SIP (missing name)", + sipChecksum: checksum, + wantErr: "CreateSIP: name field is required", + }, + { + name: "Fails to create a SIP (duplicated checksum)", + sipName: name, + sipChecksum: checksum, + initialData: func(ctx context.Context, t *testing.T, entc *db.Client) { + err := entc.SIP.Create(). + SetName("another.zip"). + SetChecksum(checksum). + Exec(ctx) + if err != nil { + t.Fatalf("Couldn't create initial data: %v", err) + } + }, + wantErr: "CreateSIP: there is already a SIP with the same checksum", + }, + } { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + entc, ps := setUpClient(t) + + if tt.initialData != nil { + tt.initialData(ctx, t, entc) + } + + err := ps.CreateSIP(ctx, tt.sipName, tt.sipChecksum) + if tt.wantErr != "" { + assert.Error(t, err, tt.wantErr) + return + } + + assert.NilError(t, err) + }) + } +} diff --git a/internal/persistence/ent/db/client.go b/internal/persistence/ent/db/client.go new file mode 100644 index 00000000..f5c2734d --- /dev/null +++ b/internal/persistence/ent/db/client.go @@ -0,0 +1,340 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // SIP is the client for interacting with the SIP builders. + SIP *SIPClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.SIP = NewSIPClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("db: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("db: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + SIP: NewSIPClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + SIP: NewSIPClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// SIP. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.SIP.Use(hooks...) +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + c.SIP.Intercept(interceptors...) +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *SIPMutation: + return c.SIP.mutate(ctx, m) + default: + return nil, fmt.Errorf("db: unknown mutation type %T", m) + } +} + +// SIPClient is a client for the SIP schema. +type SIPClient struct { + config +} + +// NewSIPClient returns a client for the SIP from the given config. +func NewSIPClient(c config) *SIPClient { + return &SIPClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `sip.Hooks(f(g(h())))`. +func (c *SIPClient) Use(hooks ...Hook) { + c.hooks.SIP = append(c.hooks.SIP, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `sip.Intercept(f(g(h())))`. +func (c *SIPClient) Intercept(interceptors ...Interceptor) { + c.inters.SIP = append(c.inters.SIP, interceptors...) +} + +// Create returns a builder for creating a SIP entity. +func (c *SIPClient) Create() *SIPCreate { + mutation := newSIPMutation(c.config, OpCreate) + return &SIPCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of SIP entities. +func (c *SIPClient) CreateBulk(builders ...*SIPCreate) *SIPCreateBulk { + return &SIPCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *SIPClient) MapCreateBulk(slice any, setFunc func(*SIPCreate, int)) *SIPCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &SIPCreateBulk{err: fmt.Errorf("calling to SIPClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*SIPCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &SIPCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for SIP. +func (c *SIPClient) Update() *SIPUpdate { + mutation := newSIPMutation(c.config, OpUpdate) + return &SIPUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *SIPClient) UpdateOne(s *SIP) *SIPUpdateOne { + mutation := newSIPMutation(c.config, OpUpdateOne, withSIP(s)) + return &SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *SIPClient) UpdateOneID(id int) *SIPUpdateOne { + mutation := newSIPMutation(c.config, OpUpdateOne, withSIPID(id)) + return &SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for SIP. +func (c *SIPClient) Delete() *SIPDelete { + mutation := newSIPMutation(c.config, OpDelete) + return &SIPDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *SIPClient) DeleteOne(s *SIP) *SIPDeleteOne { + return c.DeleteOneID(s.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *SIPClient) DeleteOneID(id int) *SIPDeleteOne { + builder := c.Delete().Where(sip.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &SIPDeleteOne{builder} +} + +// Query returns a query builder for SIP. +func (c *SIPClient) Query() *SIPQuery { + return &SIPQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeSIP}, + inters: c.Interceptors(), + } +} + +// Get returns a SIP entity by its id. +func (c *SIPClient) Get(ctx context.Context, id int) (*SIP, error) { + return c.Query().Where(sip.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *SIPClient) GetX(ctx context.Context, id int) *SIP { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *SIPClient) Hooks() []Hook { + return c.hooks.SIP +} + +// Interceptors returns the client interceptors. +func (c *SIPClient) Interceptors() []Interceptor { + return c.inters.SIP +} + +func (c *SIPClient) mutate(ctx context.Context, m *SIPMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&SIPCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&SIPUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&SIPUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&SIPDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("db: unknown SIP mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + SIP []ent.Hook + } + inters struct { + SIP []ent.Interceptor + } +) diff --git a/internal/persistence/ent/db/ent.go b/internal/persistence/ent/db/ent.go new file mode 100644 index 00000000..83ef1438 --- /dev/null +++ b/internal/persistence/ent/db/ent.go @@ -0,0 +1,608 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + sip.Table: sip.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("db: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(db.As(db.Sum(field1), "sum_field1"), (db.As(db.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("db: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "db: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "db: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "db: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "db: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("db: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("db: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/internal/persistence/ent/db/enttest/enttest.go b/internal/persistence/ent/db/enttest/enttest.go new file mode 100644 index 00000000..172ef5a9 --- /dev/null +++ b/internal/persistence/ent/db/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db" + // required by schema hooks. + _ "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []db.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...db.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls db.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *db.Client { + o := newOptions(opts) + c, err := db.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls db.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *db.Client { + o := newOptions(opts) + c := db.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *db.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/internal/persistence/ent/db/hook/hook.go b/internal/persistence/ent/db/hook/hook.go new file mode 100644 index 00000000..e78f2fc4 --- /dev/null +++ b/internal/persistence/ent/db/hook/hook.go @@ -0,0 +1,199 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db" +) + +// The SIPFunc type is an adapter to allow the use of ordinary +// function as SIP mutator. +type SIPFunc func(context.Context, *db.SIPMutation) (db.Value, error) + +// Mutate calls f(ctx, m). +func (f SIPFunc) Mutate(ctx context.Context, m db.Mutation) (db.Value, error) { + if mv, ok := m.(*db.SIPMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *db.SIPMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, db.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m db.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m db.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m db.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op db.Op) Condition { + return func(_ context.Context, m db.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m db.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m db.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m db.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk db.Hook, cond Condition) db.Hook { + return func(next db.Mutator) db.Mutator { + return db.MutateFunc(func(ctx context.Context, m db.Mutation) (db.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, db.Delete|db.Create) +func On(hk db.Hook, op db.Op) db.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, db.Update|db.UpdateOne) +func Unless(hk db.Hook, op db.Op) db.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) db.Hook { + return func(db.Mutator) db.Mutator { + return db.MutateFunc(func(context.Context, db.Mutation) (db.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []db.Hook { +// return []db.Hook{ +// Reject(db.Delete|db.Update), +// } +// } +func Reject(op db.Op) db.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []db.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...db.Hook) Chain { + return Chain{append([]db.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() db.Hook { + return func(mutator db.Mutator) db.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...db.Hook) Chain { + newHooks := make([]db.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/internal/persistence/ent/db/migrate/migrate.go b/internal/persistence/ent/db/migrate/migrate.go new file mode 100644 index 00000000..d8d3bcb8 --- /dev/null +++ b/internal/persistence/ent/db/migrate/migrate.go @@ -0,0 +1,96 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// Diff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new migration files. +func Diff(ctx context.Context, url string, opts ...schema.MigrateOption) error { + return NamedDiff(ctx, url, "changes", opts...) +} + +// NamedDiff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new named migration files. +func NamedDiff(ctx context.Context, url, name string, opts ...schema.MigrateOption) error { + return schema.Diff(ctx, url, name, Tables, opts...) +} + +// Diff creates a migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) Diff(ctx context.Context, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Diff(ctx, Tables...) +} + +// NamedDiff creates a named migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) NamedDiff(ctx context.Context, name string, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.NamedDiff(ctx, name, Tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/internal/persistence/ent/db/migrate/schema.go b/internal/persistence/ent/db/migrate/schema.go new file mode 100644 index 00000000..6a538a9f --- /dev/null +++ b/internal/persistence/ent/db/migrate/schema.go @@ -0,0 +1,34 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // SipColumns holds the columns for the "sip" table. + SipColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Size: 1024}, + {Name: "checksum", Type: field.TypeString, Unique: true, Size: 64}, + } + // SipTable holds the schema information for the "sip" table. + SipTable = &schema.Table{ + Name: "sip", + Columns: SipColumns, + PrimaryKey: []*schema.Column{SipColumns[0]}, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + SipTable, + } +) + +func init() { + SipTable.Annotation = &entsql.Annotation{ + Table: "sip", + } +} diff --git a/internal/persistence/ent/db/mutation.go b/internal/persistence/ent/db/mutation.go new file mode 100644 index 00000000..8c41c358 --- /dev/null +++ b/internal/persistence/ent/db/mutation.go @@ -0,0 +1,407 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeSIP = "SIP" +) + +// SIPMutation represents an operation that mutates the SIP nodes in the graph. +type SIPMutation struct { + config + op Op + typ string + id *int + name *string + checksum *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*SIP, error) + predicates []predicate.SIP +} + +var _ ent.Mutation = (*SIPMutation)(nil) + +// sipOption allows management of the mutation configuration using functional options. +type sipOption func(*SIPMutation) + +// newSIPMutation creates new mutation for the SIP entity. +func newSIPMutation(c config, op Op, opts ...sipOption) *SIPMutation { + m := &SIPMutation{ + config: c, + op: op, + typ: TypeSIP, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withSIPID sets the ID field of the mutation. +func withSIPID(id int) sipOption { + return func(m *SIPMutation) { + var ( + err error + once sync.Once + value *SIP + ) + m.oldValue = func(ctx context.Context) (*SIP, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().SIP.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withSIP sets the old SIP of the mutation. +func withSIP(node *SIP) sipOption { + return func(m *SIPMutation) { + m.oldValue = func(context.Context) (*SIP, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m SIPMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m SIPMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("db: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *SIPMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *SIPMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().SIP.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *SIPMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *SIPMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *SIPMutation) ResetName() { + m.name = nil +} + +// SetChecksum sets the "checksum" field. +func (m *SIPMutation) SetChecksum(s string) { + m.checksum = &s +} + +// Checksum returns the value of the "checksum" field in the mutation. +func (m *SIPMutation) Checksum() (r string, exists bool) { + v := m.checksum + if v == nil { + return + } + return *v, true +} + +// OldChecksum returns the old "checksum" field's value of the SIP entity. +// If the SIP object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SIPMutation) OldChecksum(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldChecksum is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldChecksum requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldChecksum: %w", err) + } + return oldValue.Checksum, nil +} + +// ResetChecksum resets all changes to the "checksum" field. +func (m *SIPMutation) ResetChecksum() { + m.checksum = nil +} + +// Where appends a list predicates to the SIPMutation builder. +func (m *SIPMutation) Where(ps ...predicate.SIP) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the SIPMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *SIPMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.SIP, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *SIPMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *SIPMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (SIP). +func (m *SIPMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *SIPMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.name != nil { + fields = append(fields, sip.FieldName) + } + if m.checksum != nil { + fields = append(fields, sip.FieldChecksum) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *SIPMutation) Field(name string) (ent.Value, bool) { + switch name { + case sip.FieldName: + return m.Name() + case sip.FieldChecksum: + return m.Checksum() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *SIPMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case sip.FieldName: + return m.OldName(ctx) + case sip.FieldChecksum: + return m.OldChecksum(ctx) + } + return nil, fmt.Errorf("unknown SIP field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SIPMutation) SetField(name string, value ent.Value) error { + switch name { + case sip.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case sip.FieldChecksum: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetChecksum(v) + return nil + } + return fmt.Errorf("unknown SIP field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *SIPMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *SIPMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SIPMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown SIP numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *SIPMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *SIPMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *SIPMutation) ClearField(name string) error { + return fmt.Errorf("unknown SIP nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *SIPMutation) ResetField(name string) error { + switch name { + case sip.FieldName: + m.ResetName() + return nil + case sip.FieldChecksum: + m.ResetChecksum() + return nil + } + return fmt.Errorf("unknown SIP field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *SIPMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *SIPMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *SIPMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *SIPMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *SIPMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *SIPMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *SIPMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown SIP unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *SIPMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown SIP edge %s", name) +} diff --git a/internal/persistence/ent/db/predicate/predicate.go b/internal/persistence/ent/db/predicate/predicate.go new file mode 100644 index 00000000..74754950 --- /dev/null +++ b/internal/persistence/ent/db/predicate/predicate.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// SIP is the predicate function for sip builders. +type SIP func(*sql.Selector) diff --git a/internal/persistence/ent/db/runtime.go b/internal/persistence/ent/db/runtime.go new file mode 100644 index 00000000..dc87aeaa --- /dev/null +++ b/internal/persistence/ent/db/runtime.go @@ -0,0 +1,9 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { +} diff --git a/internal/persistence/ent/db/runtime/runtime.go b/internal/persistence/ent/db/runtime/runtime.go new file mode 100644 index 00000000..b5d89dbf --- /dev/null +++ b/internal/persistence/ent/db/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/runtime.go + +const ( + Version = "v0.14.1" // Version of ent codegen. + Sum = "h1:fUERL506Pqr92EPHJqr8EYxbPioflJo6PudkrEA8a/s=" // Sum of ent codegen. +) diff --git a/internal/persistence/ent/db/sip.go b/internal/persistence/ent/db/sip.go new file mode 100644 index 00000000..4dae6424 --- /dev/null +++ b/internal/persistence/ent/db/sip.go @@ -0,0 +1,114 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// SIP is the model entity for the SIP schema. +type SIP struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Checksum holds the value of the "checksum" field. + Checksum string `json:"checksum,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*SIP) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case sip.FieldID: + values[i] = new(sql.NullInt64) + case sip.FieldName, sip.FieldChecksum: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the SIP fields. +func (s *SIP) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case sip.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + s.ID = int(value.Int64) + case sip.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + s.Name = value.String + } + case sip.FieldChecksum: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field checksum", values[i]) + } else if value.Valid { + s.Checksum = value.String + } + default: + s.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the SIP. +// This includes values selected through modifiers, order, etc. +func (s *SIP) Value(name string) (ent.Value, error) { + return s.selectValues.Get(name) +} + +// Update returns a builder for updating this SIP. +// Note that you need to call SIP.Unwrap() before calling this method if this SIP +// was returned from a transaction, and the transaction was committed or rolled back. +func (s *SIP) Update() *SIPUpdateOne { + return NewSIPClient(s.config).UpdateOne(s) +} + +// Unwrap unwraps the SIP entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (s *SIP) Unwrap() *SIP { + _tx, ok := s.config.driver.(*txDriver) + if !ok { + panic("db: SIP is not a transactional entity") + } + s.config.driver = _tx.drv + return s +} + +// String implements the fmt.Stringer. +func (s *SIP) String() string { + var builder strings.Builder + builder.WriteString("SIP(") + builder.WriteString(fmt.Sprintf("id=%v, ", s.ID)) + builder.WriteString("name=") + builder.WriteString(s.Name) + builder.WriteString(", ") + builder.WriteString("checksum=") + builder.WriteString(s.Checksum) + builder.WriteByte(')') + return builder.String() +} + +// SIPs is a parsable slice of SIP. +type SIPs []*SIP diff --git a/internal/persistence/ent/db/sip/sip.go b/internal/persistence/ent/db/sip/sip.go new file mode 100644 index 00000000..456b2a20 --- /dev/null +++ b/internal/persistence/ent/db/sip/sip.go @@ -0,0 +1,55 @@ +// Code generated by ent, DO NOT EDIT. + +package sip + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the sip type in the database. + Label = "sip" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldChecksum holds the string denoting the checksum field in the database. + FieldChecksum = "checksum" + // Table holds the table name of the sip in the database. + Table = "sip" +) + +// Columns holds all SQL columns for sip fields. +var Columns = []string{ + FieldID, + FieldName, + FieldChecksum, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// OrderOption defines the ordering options for the SIP queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByChecksum orders the results by the checksum field. +func ByChecksum(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldChecksum, opts...).ToFunc() +} diff --git a/internal/persistence/ent/db/sip/where.go b/internal/persistence/ent/db/sip/where.go new file mode 100644 index 00000000..a9c2c41c --- /dev/null +++ b/internal/persistence/ent/db/sip/where.go @@ -0,0 +1,208 @@ +// Code generated by ent, DO NOT EDIT. + +package sip + +import ( + "entgo.io/ent/dialect/sql" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldName, v)) +} + +// Checksum applies equality check predicate on the "checksum" field. It's identical to ChecksumEQ. +func Checksum(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldChecksum, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.SIP { + return predicate.SIP(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldContainsFold(FieldName, v)) +} + +// ChecksumEQ applies the EQ predicate on the "checksum" field. +func ChecksumEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldEQ(FieldChecksum, v)) +} + +// ChecksumNEQ applies the NEQ predicate on the "checksum" field. +func ChecksumNEQ(v string) predicate.SIP { + return predicate.SIP(sql.FieldNEQ(FieldChecksum, v)) +} + +// ChecksumIn applies the In predicate on the "checksum" field. +func ChecksumIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldIn(FieldChecksum, vs...)) +} + +// ChecksumNotIn applies the NotIn predicate on the "checksum" field. +func ChecksumNotIn(vs ...string) predicate.SIP { + return predicate.SIP(sql.FieldNotIn(FieldChecksum, vs...)) +} + +// ChecksumGT applies the GT predicate on the "checksum" field. +func ChecksumGT(v string) predicate.SIP { + return predicate.SIP(sql.FieldGT(FieldChecksum, v)) +} + +// ChecksumGTE applies the GTE predicate on the "checksum" field. +func ChecksumGTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldGTE(FieldChecksum, v)) +} + +// ChecksumLT applies the LT predicate on the "checksum" field. +func ChecksumLT(v string) predicate.SIP { + return predicate.SIP(sql.FieldLT(FieldChecksum, v)) +} + +// ChecksumLTE applies the LTE predicate on the "checksum" field. +func ChecksumLTE(v string) predicate.SIP { + return predicate.SIP(sql.FieldLTE(FieldChecksum, v)) +} + +// ChecksumContains applies the Contains predicate on the "checksum" field. +func ChecksumContains(v string) predicate.SIP { + return predicate.SIP(sql.FieldContains(FieldChecksum, v)) +} + +// ChecksumHasPrefix applies the HasPrefix predicate on the "checksum" field. +func ChecksumHasPrefix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasPrefix(FieldChecksum, v)) +} + +// ChecksumHasSuffix applies the HasSuffix predicate on the "checksum" field. +func ChecksumHasSuffix(v string) predicate.SIP { + return predicate.SIP(sql.FieldHasSuffix(FieldChecksum, v)) +} + +// ChecksumEqualFold applies the EqualFold predicate on the "checksum" field. +func ChecksumEqualFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldEqualFold(FieldChecksum, v)) +} + +// ChecksumContainsFold applies the ContainsFold predicate on the "checksum" field. +func ChecksumContainsFold(v string) predicate.SIP { + return predicate.SIP(sql.FieldContainsFold(FieldChecksum, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.SIP) predicate.SIP { + return predicate.SIP(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.SIP) predicate.SIP { + return predicate.SIP(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.SIP) predicate.SIP { + return predicate.SIP(sql.NotPredicates(p)) +} diff --git a/internal/persistence/ent/db/sip_create.go b/internal/persistence/ent/db/sip_create.go new file mode 100644 index 00000000..f8a218ad --- /dev/null +++ b/internal/persistence/ent/db/sip_create.go @@ -0,0 +1,196 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// SIPCreate is the builder for creating a SIP entity. +type SIPCreate struct { + config + mutation *SIPMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (sc *SIPCreate) SetName(s string) *SIPCreate { + sc.mutation.SetName(s) + return sc +} + +// SetChecksum sets the "checksum" field. +func (sc *SIPCreate) SetChecksum(s string) *SIPCreate { + sc.mutation.SetChecksum(s) + return sc +} + +// Mutation returns the SIPMutation object of the builder. +func (sc *SIPCreate) Mutation() *SIPMutation { + return sc.mutation +} + +// Save creates the SIP in the database. +func (sc *SIPCreate) Save(ctx context.Context) (*SIP, error) { + return withHooks(ctx, sc.sqlSave, sc.mutation, sc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (sc *SIPCreate) SaveX(ctx context.Context) *SIP { + v, err := sc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (sc *SIPCreate) Exec(ctx context.Context) error { + _, err := sc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (sc *SIPCreate) ExecX(ctx context.Context) { + if err := sc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (sc *SIPCreate) check() error { + if _, ok := sc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`db: missing required field "SIP.name"`)} + } + if _, ok := sc.mutation.Checksum(); !ok { + return &ValidationError{Name: "checksum", err: errors.New(`db: missing required field "SIP.checksum"`)} + } + return nil +} + +func (sc *SIPCreate) sqlSave(ctx context.Context) (*SIP, error) { + if err := sc.check(); err != nil { + return nil, err + } + _node, _spec := sc.createSpec() + if err := sqlgraph.CreateNode(ctx, sc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + sc.mutation.id = &_node.ID + sc.mutation.done = true + return _node, nil +} + +func (sc *SIPCreate) createSpec() (*SIP, *sqlgraph.CreateSpec) { + var ( + _node = &SIP{config: sc.config} + _spec = sqlgraph.NewCreateSpec(sip.Table, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + ) + if value, ok := sc.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := sc.mutation.Checksum(); ok { + _spec.SetField(sip.FieldChecksum, field.TypeString, value) + _node.Checksum = value + } + return _node, _spec +} + +// SIPCreateBulk is the builder for creating many SIP entities in bulk. +type SIPCreateBulk struct { + config + err error + builders []*SIPCreate +} + +// Save creates the SIP entities in the database. +func (scb *SIPCreateBulk) Save(ctx context.Context) ([]*SIP, error) { + if scb.err != nil { + return nil, scb.err + } + specs := make([]*sqlgraph.CreateSpec, len(scb.builders)) + nodes := make([]*SIP, len(scb.builders)) + mutators := make([]Mutator, len(scb.builders)) + for i := range scb.builders { + func(i int, root context.Context) { + builder := scb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*SIPMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, scb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, scb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, scb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (scb *SIPCreateBulk) SaveX(ctx context.Context) []*SIP { + v, err := scb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (scb *SIPCreateBulk) Exec(ctx context.Context) error { + _, err := scb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (scb *SIPCreateBulk) ExecX(ctx context.Context) { + if err := scb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/persistence/ent/db/sip_delete.go b/internal/persistence/ent/db/sip_delete.go new file mode 100644 index 00000000..beb44bc3 --- /dev/null +++ b/internal/persistence/ent/db/sip_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// SIPDelete is the builder for deleting a SIP entity. +type SIPDelete struct { + config + hooks []Hook + mutation *SIPMutation +} + +// Where appends a list predicates to the SIPDelete builder. +func (sd *SIPDelete) Where(ps ...predicate.SIP) *SIPDelete { + sd.mutation.Where(ps...) + return sd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (sd *SIPDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, sd.sqlExec, sd.mutation, sd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (sd *SIPDelete) ExecX(ctx context.Context) int { + n, err := sd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (sd *SIPDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(sip.Table, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + if ps := sd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, sd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + sd.mutation.done = true + return affected, err +} + +// SIPDeleteOne is the builder for deleting a single SIP entity. +type SIPDeleteOne struct { + sd *SIPDelete +} + +// Where appends a list predicates to the SIPDelete builder. +func (sdo *SIPDeleteOne) Where(ps ...predicate.SIP) *SIPDeleteOne { + sdo.sd.mutation.Where(ps...) + return sdo +} + +// Exec executes the deletion query. +func (sdo *SIPDeleteOne) Exec(ctx context.Context) error { + n, err := sdo.sd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{sip.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (sdo *SIPDeleteOne) ExecX(ctx context.Context) { + if err := sdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/persistence/ent/db/sip_query.go b/internal/persistence/ent/db/sip_query.go new file mode 100644 index 00000000..afc27d6a --- /dev/null +++ b/internal/persistence/ent/db/sip_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// SIPQuery is the builder for querying SIP entities. +type SIPQuery struct { + config + ctx *QueryContext + order []sip.OrderOption + inters []Interceptor + predicates []predicate.SIP + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the SIPQuery builder. +func (sq *SIPQuery) Where(ps ...predicate.SIP) *SIPQuery { + sq.predicates = append(sq.predicates, ps...) + return sq +} + +// Limit the number of records to be returned by this query. +func (sq *SIPQuery) Limit(limit int) *SIPQuery { + sq.ctx.Limit = &limit + return sq +} + +// Offset to start from. +func (sq *SIPQuery) Offset(offset int) *SIPQuery { + sq.ctx.Offset = &offset + return sq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (sq *SIPQuery) Unique(unique bool) *SIPQuery { + sq.ctx.Unique = &unique + return sq +} + +// Order specifies how the records should be ordered. +func (sq *SIPQuery) Order(o ...sip.OrderOption) *SIPQuery { + sq.order = append(sq.order, o...) + return sq +} + +// First returns the first SIP entity from the query. +// Returns a *NotFoundError when no SIP was found. +func (sq *SIPQuery) First(ctx context.Context) (*SIP, error) { + nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{sip.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (sq *SIPQuery) FirstX(ctx context.Context) *SIP { + node, err := sq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first SIP ID from the query. +// Returns a *NotFoundError when no SIP ID was found. +func (sq *SIPQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{sip.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (sq *SIPQuery) FirstIDX(ctx context.Context) int { + id, err := sq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single SIP entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one SIP entity is found. +// Returns a *NotFoundError when no SIP entities are found. +func (sq *SIPQuery) Only(ctx context.Context) (*SIP, error) { + nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{sip.Label} + default: + return nil, &NotSingularError{sip.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (sq *SIPQuery) OnlyX(ctx context.Context) *SIP { + node, err := sq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only SIP ID in the query. +// Returns a *NotSingularError when more than one SIP ID is found. +// Returns a *NotFoundError when no entities are found. +func (sq *SIPQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{sip.Label} + default: + err = &NotSingularError{sip.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (sq *SIPQuery) OnlyIDX(ctx context.Context) int { + id, err := sq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of SIPs. +func (sq *SIPQuery) All(ctx context.Context) ([]*SIP, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryAll) + if err := sq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*SIP, *SIPQuery]() + return withInterceptors[[]*SIP](ctx, sq, qr, sq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (sq *SIPQuery) AllX(ctx context.Context) []*SIP { + nodes, err := sq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of SIP IDs. +func (sq *SIPQuery) IDs(ctx context.Context) (ids []int, err error) { + if sq.ctx.Unique == nil && sq.path != nil { + sq.Unique(true) + } + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryIDs) + if err = sq.Select(sip.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (sq *SIPQuery) IDsX(ctx context.Context) []int { + ids, err := sq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (sq *SIPQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryCount) + if err := sq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, sq, querierCount[*SIPQuery](), sq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (sq *SIPQuery) CountX(ctx context.Context) int { + count, err := sq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (sq *SIPQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, sq.ctx, ent.OpQueryExist) + switch _, err := sq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("db: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (sq *SIPQuery) ExistX(ctx context.Context) bool { + exist, err := sq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the SIPQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (sq *SIPQuery) Clone() *SIPQuery { + if sq == nil { + return nil + } + return &SIPQuery{ + config: sq.config, + ctx: sq.ctx.Clone(), + order: append([]sip.OrderOption{}, sq.order...), + inters: append([]Interceptor{}, sq.inters...), + predicates: append([]predicate.SIP{}, sq.predicates...), + // clone intermediate query. + sql: sq.sql.Clone(), + path: sq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.SIP.Query(). +// GroupBy(sip.FieldName). +// Aggregate(db.Count()). +// Scan(ctx, &v) +func (sq *SIPQuery) GroupBy(field string, fields ...string) *SIPGroupBy { + sq.ctx.Fields = append([]string{field}, fields...) + grbuild := &SIPGroupBy{build: sq} + grbuild.flds = &sq.ctx.Fields + grbuild.label = sip.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.SIP.Query(). +// Select(sip.FieldName). +// Scan(ctx, &v) +func (sq *SIPQuery) Select(fields ...string) *SIPSelect { + sq.ctx.Fields = append(sq.ctx.Fields, fields...) + sbuild := &SIPSelect{SIPQuery: sq} + sbuild.label = sip.Label + sbuild.flds, sbuild.scan = &sq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a SIPSelect configured with the given aggregations. +func (sq *SIPQuery) Aggregate(fns ...AggregateFunc) *SIPSelect { + return sq.Select().Aggregate(fns...) +} + +func (sq *SIPQuery) prepareQuery(ctx context.Context) error { + for _, inter := range sq.inters { + if inter == nil { + return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, sq); err != nil { + return err + } + } + } + for _, f := range sq.ctx.Fields { + if !sip.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + } + if sq.path != nil { + prev, err := sq.path(ctx) + if err != nil { + return err + } + sq.sql = prev + } + return nil +} + +func (sq *SIPQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*SIP, error) { + var ( + nodes = []*SIP{} + _spec = sq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*SIP).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &SIP{config: sq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, sq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (sq *SIPQuery) sqlCount(ctx context.Context) (int, error) { + _spec := sq.querySpec() + _spec.Node.Columns = sq.ctx.Fields + if len(sq.ctx.Fields) > 0 { + _spec.Unique = sq.ctx.Unique != nil && *sq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, sq.driver, _spec) +} + +func (sq *SIPQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + _spec.From = sq.sql + if unique := sq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if sq.path != nil { + _spec.Unique = true + } + if fields := sq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, sip.FieldID) + for i := range fields { + if fields[i] != sip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := sq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := sq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := sq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := sq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (sq *SIPQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(sq.driver.Dialect()) + t1 := builder.Table(sip.Table) + columns := sq.ctx.Fields + if len(columns) == 0 { + columns = sip.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if sq.sql != nil { + selector = sq.sql + selector.Select(selector.Columns(columns...)...) + } + if sq.ctx.Unique != nil && *sq.ctx.Unique { + selector.Distinct() + } + for _, p := range sq.predicates { + p(selector) + } + for _, p := range sq.order { + p(selector) + } + if offset := sq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := sq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// SIPGroupBy is the group-by builder for SIP entities. +type SIPGroupBy struct { + selector + build *SIPQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (sgb *SIPGroupBy) Aggregate(fns ...AggregateFunc) *SIPGroupBy { + sgb.fns = append(sgb.fns, fns...) + return sgb +} + +// Scan applies the selector query and scans the result into the given value. +func (sgb *SIPGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sgb.build.ctx, ent.OpQueryGroupBy) + if err := sgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SIPQuery, *SIPGroupBy](ctx, sgb.build, sgb, sgb.build.inters, v) +} + +func (sgb *SIPGroupBy) sqlScan(ctx context.Context, root *SIPQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(sgb.fns)) + for _, fn := range sgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*sgb.flds)+len(sgb.fns)) + for _, f := range *sgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*sgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := sgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// SIPSelect is the builder for selecting fields of SIP entities. +type SIPSelect struct { + *SIPQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ss *SIPSelect) Aggregate(fns ...AggregateFunc) *SIPSelect { + ss.fns = append(ss.fns, fns...) + return ss +} + +// Scan applies the selector query and scans the result into the given value. +func (ss *SIPSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ss.ctx, ent.OpQuerySelect) + if err := ss.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SIPQuery, *SIPSelect](ctx, ss.SIPQuery, ss, ss.inters, v) +} + +func (ss *SIPSelect) sqlScan(ctx context.Context, root *SIPQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ss.fns)) + for _, fn := range ss.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ss.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ss.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/persistence/ent/db/sip_update.go b/internal/persistence/ent/db/sip_update.go new file mode 100644 index 00000000..cdadf6d1 --- /dev/null +++ b/internal/persistence/ent/db/sip_update.go @@ -0,0 +1,243 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/predicate" + "github.com/artefactual-sdps/preprocessing-sfa/internal/persistence/ent/db/sip" +) + +// SIPUpdate is the builder for updating SIP entities. +type SIPUpdate struct { + config + hooks []Hook + mutation *SIPMutation +} + +// Where appends a list predicates to the SIPUpdate builder. +func (su *SIPUpdate) Where(ps ...predicate.SIP) *SIPUpdate { + su.mutation.Where(ps...) + return su +} + +// SetName sets the "name" field. +func (su *SIPUpdate) SetName(s string) *SIPUpdate { + su.mutation.SetName(s) + return su +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (su *SIPUpdate) SetNillableName(s *string) *SIPUpdate { + if s != nil { + su.SetName(*s) + } + return su +} + +// SetChecksum sets the "checksum" field. +func (su *SIPUpdate) SetChecksum(s string) *SIPUpdate { + su.mutation.SetChecksum(s) + return su +} + +// SetNillableChecksum sets the "checksum" field if the given value is not nil. +func (su *SIPUpdate) SetNillableChecksum(s *string) *SIPUpdate { + if s != nil { + su.SetChecksum(*s) + } + return su +} + +// Mutation returns the SIPMutation object of the builder. +func (su *SIPUpdate) Mutation() *SIPMutation { + return su.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (su *SIPUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, su.sqlSave, su.mutation, su.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (su *SIPUpdate) SaveX(ctx context.Context) int { + affected, err := su.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (su *SIPUpdate) Exec(ctx context.Context) error { + _, err := su.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (su *SIPUpdate) ExecX(ctx context.Context) { + if err := su.Exec(ctx); err != nil { + panic(err) + } +} + +func (su *SIPUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + if ps := su.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := su.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + } + if value, ok := su.mutation.Checksum(); ok { + _spec.SetField(sip.FieldChecksum, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, su.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{sip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + su.mutation.done = true + return n, nil +} + +// SIPUpdateOne is the builder for updating a single SIP entity. +type SIPUpdateOne struct { + config + fields []string + hooks []Hook + mutation *SIPMutation +} + +// SetName sets the "name" field. +func (suo *SIPUpdateOne) SetName(s string) *SIPUpdateOne { + suo.mutation.SetName(s) + return suo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableName(s *string) *SIPUpdateOne { + if s != nil { + suo.SetName(*s) + } + return suo +} + +// SetChecksum sets the "checksum" field. +func (suo *SIPUpdateOne) SetChecksum(s string) *SIPUpdateOne { + suo.mutation.SetChecksum(s) + return suo +} + +// SetNillableChecksum sets the "checksum" field if the given value is not nil. +func (suo *SIPUpdateOne) SetNillableChecksum(s *string) *SIPUpdateOne { + if s != nil { + suo.SetChecksum(*s) + } + return suo +} + +// Mutation returns the SIPMutation object of the builder. +func (suo *SIPUpdateOne) Mutation() *SIPMutation { + return suo.mutation +} + +// Where appends a list predicates to the SIPUpdate builder. +func (suo *SIPUpdateOne) Where(ps ...predicate.SIP) *SIPUpdateOne { + suo.mutation.Where(ps...) + return suo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (suo *SIPUpdateOne) Select(field string, fields ...string) *SIPUpdateOne { + suo.fields = append([]string{field}, fields...) + return suo +} + +// Save executes the query and returns the updated SIP entity. +func (suo *SIPUpdateOne) Save(ctx context.Context) (*SIP, error) { + return withHooks(ctx, suo.sqlSave, suo.mutation, suo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (suo *SIPUpdateOne) SaveX(ctx context.Context) *SIP { + node, err := suo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (suo *SIPUpdateOne) Exec(ctx context.Context) error { + _, err := suo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (suo *SIPUpdateOne) ExecX(ctx context.Context) { + if err := suo.Exec(ctx); err != nil { + panic(err) + } +} + +func (suo *SIPUpdateOne) sqlSave(ctx context.Context) (_node *SIP, err error) { + _spec := sqlgraph.NewUpdateSpec(sip.Table, sip.Columns, sqlgraph.NewFieldSpec(sip.FieldID, field.TypeInt)) + id, ok := suo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`db: missing "SIP.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := suo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, sip.FieldID) + for _, f := range fields { + if !sip.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)} + } + if f != sip.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := suo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := suo.mutation.Name(); ok { + _spec.SetField(sip.FieldName, field.TypeString, value) + } + if value, ok := suo.mutation.Checksum(); ok { + _spec.SetField(sip.FieldChecksum, field.TypeString, value) + } + _node = &SIP{config: suo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, suo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{sip.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + suo.mutation.done = true + return _node, nil +} diff --git a/internal/persistence/ent/db/tx.go b/internal/persistence/ent/db/tx.go new file mode 100644 index 00000000..1f738185 --- /dev/null +++ b/internal/persistence/ent/db/tx.go @@ -0,0 +1,210 @@ +// Code generated by ent, DO NOT EDIT. + +package db + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // SIP is the client for interacting with the SIP builders. + SIP *SIPClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.SIP = NewSIPClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: SIP.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/internal/persistence/ent/schema/sip.go b/internal/persistence/ent/schema/sip.go new file mode 100644 index 00000000..07de26a9 --- /dev/null +++ b/internal/persistence/ent/schema/sip.go @@ -0,0 +1,35 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" +) + +// SIP holds the schema definition for the SIP entity. +type SIP struct { + ent.Schema +} + +// Annotations of the SIP. +func (SIP) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "sip"}, + } +} + +// Fields of the SIP. +func (SIP) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + Annotations(entsql.Annotation{ + Size: 1024, + }), + field.String("checksum"). + Annotations(entsql.Annotation{ + Size: 64, + }). + Unique(), + } +} diff --git a/internal/persistence/persistence.go b/internal/persistence/persistence.go new file mode 100644 index 00000000..b2c90682 --- /dev/null +++ b/internal/persistence/persistence.go @@ -0,0 +1,12 @@ +package persistence + +import ( + "context" + "errors" +) + +var ErrDuplicatedSIP = errors.New("there is already a SIP with the same checksum") + +type Service interface { + CreateSIP(context.Context, string, string) error +}